From 20a4e41872a5207a31511c80ab649df9b5fec298 Mon Sep 17 00:00:00 2001 From: erio Date: Mon, 20 Apr 2026 20:21:02 +0800 Subject: [PATCH] feat(monitor): admin channel monitor MVP with SSRF protection and batch aggregation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 新增 admin「渠道监控」模块(参考 BingZi-233/check-cx),独立于现有 Channel 体系。 admin 配置 + 后台定时调用上游 LLM chat completions 健康检查 + 所有登录用户只读可见。 后端: - ent: channel_monitor + channel_monitor_history(AES-256-GCM 加密 api_key) - service 按职责拆分:service/aggregator/validate/checker/runner/ssrf - provider strategy map 替代 switch(openai/anthropic/gemini) - repository batch 聚合(ListLatestForMonitorIDs + ComputeAvailabilityForMonitors)消除 N+1 - runner: ticker(5s) + pond worker pool(5) + inFlight 防并发 + TrySubmit 防雪崩 + 凌晨 3 点 cron 清理 30 天历史 - SSRF 防护:强制 https + 私网/loopback/云元数据 IP 拒绝(127/8、10/8、172.16/12、 192.168/16、169.254/16、100.64/10、::1、fc00::/7、fe80::/10)+ DialContext 在 socket 层防 DNS rebinding - API key sanitize:擦除 url.Error 与上游响应 body 中的 sk-/sk-ant-/AIza/JWT 模式 - APIKeyDecryptFailed 标志位 + 单 monitor 路径检测,避免空 key 调用上游 handler: - admin: CRUD + 手动触发 + 历史接口(api_key 脱敏) - user: 只读列表 + 状态详情(去除 api_key/endpoint) - ParseChannelMonitorID 共用 + dto.ChannelMonitorExtraModelStatus 共用 前端: - 路由 /admin/channels/{pricing,monitor} + /monitor(用户只读) - AppSidebar 父项 expandOnly 支持 - ChannelMonitorView 拆为 8 个子组件 + ChannelStatusView 拆出 detail dialog - composables/useChannelMonitorFormat + constants/channelMonitor 共享 - i18n monitorCommon namespace 消除 admin/user 两 view 重复 合规:所有文件符合 CLAUDE.md(Go ≤ 500 行 / Vue ≤ 300 行 / 函数 ≤ 30 行) CI: go build / gofmt / golangci-lint(0 issues) / make test-unit / pnpm build 全绿 --- backend/cmd/server/wire_gen.go | 14 +- backend/ent/channelmonitor.go | 273 +++ backend/ent/channelmonitor/channelmonitor.go | 223 ++ backend/ent/channelmonitor/where.go | 724 ++++++ backend/ent/channelmonitor_create.go | 1270 +++++++++++ backend/ent/channelmonitor_delete.go | 88 + backend/ent/channelmonitor_query.go | 643 ++++++ backend/ent/channelmonitor_update.go | 918 ++++++++ backend/ent/channelmonitorhistory.go | 207 ++ .../channelmonitorhistory.go | 158 ++ backend/ent/channelmonitorhistory/where.go | 444 ++++ backend/ent/channelmonitorhistory_create.go | 947 ++++++++ backend/ent/channelmonitorhistory_delete.go | 88 + backend/ent/channelmonitorhistory_query.go | 643 ++++++ backend/ent/channelmonitorhistory_update.go | 635 ++++++ backend/ent/client.go | 366 ++- backend/ent/ent.go | 4 + backend/ent/hook/hook.go | 24 + backend/ent/intercept/intercept.go | 60 + backend/ent/migrate/schema.go | 88 +- backend/ent/mutation.go | 2032 +++++++++++++++++ backend/ent/predicate/predicate.go | 6 + backend/ent/runtime/runtime.go | 123 + backend/ent/schema/channel_monitor.go | 81 + backend/ent/schema/channel_monitor_history.go | 64 + backend/ent/tx.go | 6 + backend/go.sum | 12 + .../handler/admin/channel_monitor_handler.go | 396 ++++ .../handler/channel_monitor_user_handler.go | 127 ++ .../internal/handler/dto/channel_monitor.go | 10 + backend/internal/handler/handler.go | 2 + backend/internal/handler/wire.go | 6 + .../repository/channel_monitor_repo.go | 450 ++++ backend/internal/repository/wire.go | 1 + backend/internal/server/routes/admin.go | 16 + backend/internal/server/routes/user.go | 7 + .../service/channel_monitor_aggregator.go | 217 ++ .../service/channel_monitor_challenge.go | 80 + .../service/channel_monitor_checker.go | 299 +++ .../internal/service/channel_monitor_const.go | 137 ++ .../service/channel_monitor_runner.go | 208 ++ .../service/channel_monitor_service.go | 374 +++ .../internal/service/channel_monitor_ssrf.go | 152 ++ .../internal/service/channel_monitor_types.go | 161 ++ .../service/channel_monitor_validate.go | 99 + backend/internal/service/wire.go | 19 + .../migrations/125_add_channel_monitors.sql | 58 + frontend/src/api/admin/channelMonitor.ts | 190 ++ frontend/src/api/admin/index.ts | 3 + frontend/src/api/channelMonitor.ts | 74 + frontend/src/api/index.ts | 1 + .../admin/monitor/MonitorActionsCell.vue | 45 + .../admin/monitor/MonitorFiltersBar.vue | 95 + .../admin/monitor/MonitorFormDialog.vue | 297 +++ .../admin/monitor/MonitorKeyPickerDialog.vue | 64 + .../admin/monitor/MonitorPrimaryModelCell.vue | 71 + .../admin/monitor/MonitorRunResultDialog.vue | 56 + frontend/src/components/layout/AppSidebar.vue | 82 +- .../components/user/MonitorDetailDialog.vue | 114 + .../user/MonitorPrimaryModelCell.vue | 71 + .../composables/useChannelMonitorFormat.ts | 97 + frontend/src/constants/channelMonitor.ts | 35 + frontend/src/i18n/locales/en.ts | 122 +- frontend/src/i18n/locales/zh.ts | 122 +- frontend/src/router/index.ts | 27 + .../src/views/admin/ChannelMonitorView.vue | 295 +++ frontend/src/views/user/ChannelStatusView.vue | 208 ++ 67 files changed, 14997 insertions(+), 32 deletions(-) create mode 100644 backend/ent/channelmonitor.go create mode 100644 backend/ent/channelmonitor/channelmonitor.go create mode 100644 backend/ent/channelmonitor/where.go create mode 100644 backend/ent/channelmonitor_create.go create mode 100644 backend/ent/channelmonitor_delete.go create mode 100644 backend/ent/channelmonitor_query.go create mode 100644 backend/ent/channelmonitor_update.go create mode 100644 backend/ent/channelmonitorhistory.go create mode 100644 backend/ent/channelmonitorhistory/channelmonitorhistory.go create mode 100644 backend/ent/channelmonitorhistory/where.go create mode 100644 backend/ent/channelmonitorhistory_create.go create mode 100644 backend/ent/channelmonitorhistory_delete.go create mode 100644 backend/ent/channelmonitorhistory_query.go create mode 100644 backend/ent/channelmonitorhistory_update.go create mode 100644 backend/ent/schema/channel_monitor.go create mode 100644 backend/ent/schema/channel_monitor_history.go create mode 100644 backend/internal/handler/admin/channel_monitor_handler.go create mode 100644 backend/internal/handler/channel_monitor_user_handler.go create mode 100644 backend/internal/handler/dto/channel_monitor.go create mode 100644 backend/internal/repository/channel_monitor_repo.go create mode 100644 backend/internal/service/channel_monitor_aggregator.go create mode 100644 backend/internal/service/channel_monitor_challenge.go create mode 100644 backend/internal/service/channel_monitor_checker.go create mode 100644 backend/internal/service/channel_monitor_const.go create mode 100644 backend/internal/service/channel_monitor_runner.go create mode 100644 backend/internal/service/channel_monitor_service.go create mode 100644 backend/internal/service/channel_monitor_ssrf.go create mode 100644 backend/internal/service/channel_monitor_types.go create mode 100644 backend/internal/service/channel_monitor_validate.go create mode 100644 backend/migrations/125_add_channel_monitors.sql create mode 100644 frontend/src/api/admin/channelMonitor.ts create mode 100644 frontend/src/api/channelMonitor.ts create mode 100644 frontend/src/components/admin/monitor/MonitorActionsCell.vue create mode 100644 frontend/src/components/admin/monitor/MonitorFiltersBar.vue create mode 100644 frontend/src/components/admin/monitor/MonitorFormDialog.vue create mode 100644 frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue create mode 100644 frontend/src/components/admin/monitor/MonitorPrimaryModelCell.vue create mode 100644 frontend/src/components/admin/monitor/MonitorRunResultDialog.vue create mode 100644 frontend/src/components/user/MonitorDetailDialog.vue create mode 100644 frontend/src/components/user/MonitorPrimaryModelCell.vue create mode 100644 frontend/src/composables/useChannelMonitorFormat.ts create mode 100644 frontend/src/constants/channelMonitor.ts create mode 100644 frontend/src/views/admin/ChannelMonitorView.vue create mode 100644 frontend/src/views/user/ChannelStatusView.vue diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 3b474c4a..8e367e81 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -210,6 +210,16 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { scheduledTestService := service.ProvideScheduledTestService(scheduledTestPlanRepository, scheduledTestResultRepository) scheduledTestHandler := admin.NewScheduledTestHandler(scheduledTestService) channelHandler := admin.NewChannelHandler(channelService, billingService) + sqlDB, err := repository.ProvideSQLDB(client) + if err != nil { + return nil, err + } + channelMonitorRepository := repository.NewChannelMonitorRepository(client, sqlDB) + channelMonitorService := service.ProvideChannelMonitorService(channelMonitorRepository, secretEncryptor) + channelMonitorHandler := admin.NewChannelMonitorHandler(channelMonitorService) + channelMonitorUserHandler := handler.NewChannelMonitorUserHandler(channelMonitorService) + channelMonitorRunner := service.ProvideChannelMonitorRunner(channelMonitorService) + _ = channelMonitorRunner registry := payment.ProvideRegistry() encryptionKey, err := payment.ProvideEncryptionKey(configConfig) if err != nil { @@ -221,7 +231,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService, paymentConfigService, paymentService) paymentOrderExpiryService := service.ProvidePaymentOrderExpiryService(paymentService) paymentHandler := admin.NewPaymentHandler(paymentService, paymentConfigService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, paymentHandler) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, backupHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler, tlsFingerprintProfileHandler, adminAPIKeyHandler, scheduledTestHandler, channelHandler, channelMonitorHandler, paymentHandler) usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig) userMsgQueueCache := repository.NewUserMsgQueueCache(redisClient) userMessageQueueService := service.ProvideUserMessageQueueService(userMsgQueueCache, rpmCache, configConfig) @@ -233,7 +243,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { paymentWebhookHandler := handler.NewPaymentWebhookHandler(paymentService, registry) idempotencyCoordinator := service.ProvideIdempotencyCoordinator(idempotencyRepository, configConfig) idempotencyCleanupService := service.ProvideIdempotencyCleanupService(idempotencyRepository, configConfig) - handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, idempotencyCoordinator, idempotencyCleanupService) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, announcementHandler, channelMonitorUserHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler, totpHandler, handlerPaymentHandler, paymentWebhookHandler, idempotencyCoordinator, idempotencyCleanupService) jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) diff --git a/backend/ent/channelmonitor.go b/backend/ent/channelmonitor.go new file mode 100644 index 00000000..292c2b28 --- /dev/null +++ b/backend/ent/channelmonitor.go @@ -0,0 +1,273 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" +) + +// ChannelMonitor is the model entity for the ChannelMonitor schema. +type ChannelMonitor struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Provider holds the value of the "provider" field. + Provider channelmonitor.Provider `json:"provider,omitempty"` + // Provider base origin, e.g. https://api.openai.com + Endpoint string `json:"endpoint,omitempty"` + // AES-256-GCM encrypted API key + APIKeyEncrypted string `json:"-"` + // PrimaryModel holds the value of the "primary_model" field. + PrimaryModel string `json:"primary_model,omitempty"` + // Additional model names to test alongside primary_model + ExtraModels []string `json:"extra_models,omitempty"` + // GroupName holds the value of the "group_name" field. + GroupName string `json:"group_name,omitempty"` + // Enabled holds the value of the "enabled" field. + Enabled bool `json:"enabled,omitempty"` + // IntervalSeconds holds the value of the "interval_seconds" field. + IntervalSeconds int `json:"interval_seconds,omitempty"` + // LastCheckedAt holds the value of the "last_checked_at" field. + LastCheckedAt *time.Time `json:"last_checked_at,omitempty"` + // CreatedBy holds the value of the "created_by" field. + CreatedBy int64 `json:"created_by,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ChannelMonitorQuery when eager-loading is set. + Edges ChannelMonitorEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ChannelMonitorEdges holds the relations/edges for other nodes in the graph. +type ChannelMonitorEdges struct { + // History holds the value of the history edge. + History []*ChannelMonitorHistory `json:"history,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// HistoryOrErr returns the History value or an error if the edge +// was not loaded in eager-loading. +func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) { + if e.loadedTypes[0] { + return e.History, nil + } + return nil, &NotLoadedError{edge: "history"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ChannelMonitor) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case channelmonitor.FieldExtraModels: + values[i] = new([]byte) + case channelmonitor.FieldEnabled: + values[i] = new(sql.NullBool) + case channelmonitor.FieldID, channelmonitor.FieldIntervalSeconds, channelmonitor.FieldCreatedBy: + values[i] = new(sql.NullInt64) + case channelmonitor.FieldName, channelmonitor.FieldProvider, channelmonitor.FieldEndpoint, channelmonitor.FieldAPIKeyEncrypted, channelmonitor.FieldPrimaryModel, channelmonitor.FieldGroupName: + values[i] = new(sql.NullString) + case channelmonitor.FieldCreatedAt, channelmonitor.FieldUpdatedAt, channelmonitor.FieldLastCheckedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ChannelMonitor fields. +func (_m *ChannelMonitor) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case channelmonitor.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case channelmonitor.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case channelmonitor.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case channelmonitor.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case channelmonitor.FieldProvider: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field provider", values[i]) + } else if value.Valid { + _m.Provider = channelmonitor.Provider(value.String) + } + case channelmonitor.FieldEndpoint: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field endpoint", values[i]) + } else if value.Valid { + _m.Endpoint = value.String + } + case channelmonitor.FieldAPIKeyEncrypted: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field api_key_encrypted", values[i]) + } else if value.Valid { + _m.APIKeyEncrypted = value.String + } + case channelmonitor.FieldPrimaryModel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field primary_model", values[i]) + } else if value.Valid { + _m.PrimaryModel = value.String + } + case channelmonitor.FieldExtraModels: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extra_models", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.ExtraModels); err != nil { + return fmt.Errorf("unmarshal field extra_models: %w", err) + } + } + case channelmonitor.FieldGroupName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field group_name", values[i]) + } else if value.Valid { + _m.GroupName = value.String + } + case channelmonitor.FieldEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field enabled", values[i]) + } else if value.Valid { + _m.Enabled = value.Bool + } + case channelmonitor.FieldIntervalSeconds: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field interval_seconds", values[i]) + } else if value.Valid { + _m.IntervalSeconds = int(value.Int64) + } + case channelmonitor.FieldLastCheckedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_checked_at", values[i]) + } else if value.Valid { + _m.LastCheckedAt = new(time.Time) + *_m.LastCheckedAt = value.Time + } + case channelmonitor.FieldCreatedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field created_by", values[i]) + } else if value.Valid { + _m.CreatedBy = value.Int64 + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitor. +// This includes values selected through modifiers, order, etc. +func (_m *ChannelMonitor) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryHistory queries the "history" edge of the ChannelMonitor entity. +func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery { + return NewChannelMonitorClient(_m.config).QueryHistory(_m) +} + +// Update returns a builder for updating this ChannelMonitor. +// Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *ChannelMonitor) Update() *ChannelMonitorUpdateOne { + return NewChannelMonitorClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the ChannelMonitor entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *ChannelMonitor) Unwrap() *ChannelMonitor { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: ChannelMonitor is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *ChannelMonitor) String() string { + var builder strings.Builder + builder.WriteString("ChannelMonitor(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("provider=") + builder.WriteString(fmt.Sprintf("%v", _m.Provider)) + builder.WriteString(", ") + builder.WriteString("endpoint=") + builder.WriteString(_m.Endpoint) + builder.WriteString(", ") + builder.WriteString("api_key_encrypted=") + builder.WriteString(", ") + builder.WriteString("primary_model=") + builder.WriteString(_m.PrimaryModel) + builder.WriteString(", ") + builder.WriteString("extra_models=") + builder.WriteString(fmt.Sprintf("%v", _m.ExtraModels)) + builder.WriteString(", ") + builder.WriteString("group_name=") + builder.WriteString(_m.GroupName) + builder.WriteString(", ") + builder.WriteString("enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) + builder.WriteString(", ") + builder.WriteString("interval_seconds=") + builder.WriteString(fmt.Sprintf("%v", _m.IntervalSeconds)) + builder.WriteString(", ") + if v := _m.LastCheckedAt; v != nil { + builder.WriteString("last_checked_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("created_by=") + builder.WriteString(fmt.Sprintf("%v", _m.CreatedBy)) + builder.WriteByte(')') + return builder.String() +} + +// ChannelMonitors is a parsable slice of ChannelMonitor. +type ChannelMonitors []*ChannelMonitor diff --git a/backend/ent/channelmonitor/channelmonitor.go b/backend/ent/channelmonitor/channelmonitor.go new file mode 100644 index 00000000..c5ab8199 --- /dev/null +++ b/backend/ent/channelmonitor/channelmonitor.go @@ -0,0 +1,223 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitor + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the channelmonitor type in the database. + Label = "channel_monitor" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldProvider holds the string denoting the provider field in the database. + FieldProvider = "provider" + // FieldEndpoint holds the string denoting the endpoint field in the database. + FieldEndpoint = "endpoint" + // FieldAPIKeyEncrypted holds the string denoting the api_key_encrypted field in the database. + FieldAPIKeyEncrypted = "api_key_encrypted" + // FieldPrimaryModel holds the string denoting the primary_model field in the database. + FieldPrimaryModel = "primary_model" + // FieldExtraModels holds the string denoting the extra_models field in the database. + FieldExtraModels = "extra_models" + // FieldGroupName holds the string denoting the group_name field in the database. + FieldGroupName = "group_name" + // FieldEnabled holds the string denoting the enabled field in the database. + FieldEnabled = "enabled" + // FieldIntervalSeconds holds the string denoting the interval_seconds field in the database. + FieldIntervalSeconds = "interval_seconds" + // FieldLastCheckedAt holds the string denoting the last_checked_at field in the database. + FieldLastCheckedAt = "last_checked_at" + // FieldCreatedBy holds the string denoting the created_by field in the database. + FieldCreatedBy = "created_by" + // EdgeHistory holds the string denoting the history edge name in mutations. + EdgeHistory = "history" + // Table holds the table name of the channelmonitor in the database. + Table = "channel_monitors" + // HistoryTable is the table that holds the history relation/edge. + HistoryTable = "channel_monitor_histories" + // HistoryInverseTable is the table name for the ChannelMonitorHistory entity. + // It exists in this package in order to avoid circular dependency with the "channelmonitorhistory" package. + HistoryInverseTable = "channel_monitor_histories" + // HistoryColumn is the table column denoting the history relation/edge. + HistoryColumn = "monitor_id" +) + +// Columns holds all SQL columns for channelmonitor fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldName, + FieldProvider, + FieldEndpoint, + FieldAPIKeyEncrypted, + FieldPrimaryModel, + FieldExtraModels, + FieldGroupName, + FieldEnabled, + FieldIntervalSeconds, + FieldLastCheckedAt, + FieldCreatedBy, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save. + EndpointValidator func(string) error + // APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save. + APIKeyEncryptedValidator func(string) error + // PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save. + PrimaryModelValidator func(string) error + // DefaultExtraModels holds the default value on creation for the "extra_models" field. + DefaultExtraModels []string + // DefaultGroupName holds the default value on creation for the "group_name" field. + DefaultGroupName string + // GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save. + GroupNameValidator func(string) error + // DefaultEnabled holds the default value on creation for the "enabled" field. + DefaultEnabled bool + // IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save. + IntervalSecondsValidator func(int) error +) + +// Provider defines the type for the "provider" enum field. +type Provider string + +// Provider values. +const ( + ProviderOpenai Provider = "openai" + ProviderAnthropic Provider = "anthropic" + ProviderGemini Provider = "gemini" +) + +func (pr Provider) String() string { + return string(pr) +} + +// ProviderValidator is a validator for the "provider" field enum values. It is called by the builders before save. +func ProviderValidator(pr Provider) error { + switch pr { + case ProviderOpenai, ProviderAnthropic, ProviderGemini: + return nil + default: + return fmt.Errorf("channelmonitor: invalid enum value for provider field: %q", pr) + } +} + +// OrderOption defines the ordering options for the ChannelMonitor queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByProvider orders the results by the provider field. +func ByProvider(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProvider, opts...).ToFunc() +} + +// ByEndpoint orders the results by the endpoint field. +func ByEndpoint(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndpoint, opts...).ToFunc() +} + +// ByAPIKeyEncrypted orders the results by the api_key_encrypted field. +func ByAPIKeyEncrypted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyEncrypted, opts...).ToFunc() +} + +// ByPrimaryModel orders the results by the primary_model field. +func ByPrimaryModel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrimaryModel, opts...).ToFunc() +} + +// ByGroupName orders the results by the group_name field. +func ByGroupName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupName, opts...).ToFunc() +} + +// ByEnabled orders the results by the enabled field. +func ByEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnabled, opts...).ToFunc() +} + +// ByIntervalSeconds orders the results by the interval_seconds field. +func ByIntervalSeconds(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIntervalSeconds, opts...).ToFunc() +} + +// ByLastCheckedAt orders the results by the last_checked_at field. +func ByLastCheckedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastCheckedAt, opts...).ToFunc() +} + +// ByCreatedBy orders the results by the created_by field. +func ByCreatedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedBy, opts...).ToFunc() +} + +// ByHistoryCount orders the results by history count. +func ByHistoryCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newHistoryStep(), opts...) + } +} + +// ByHistory orders the results by history terms. +func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newHistoryStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HistoryInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn), + ) +} diff --git a/backend/ent/channelmonitor/where.go b/backend/ent/channelmonitor/where.go new file mode 100644 index 00000000..8126fb77 --- /dev/null +++ b/backend/ent/channelmonitor/where.go @@ -0,0 +1,724 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitor + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v)) +} + +// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ. +func Endpoint(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v)) +} + +// APIKeyEncrypted applies equality check predicate on the "api_key_encrypted" field. It's identical to APIKeyEncryptedEQ. +func APIKeyEncrypted(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v)) +} + +// PrimaryModel applies equality check predicate on the "primary_model" field. It's identical to PrimaryModelEQ. +func PrimaryModel(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v)) +} + +// GroupName applies equality check predicate on the "group_name" field. It's identical to GroupNameEQ. +func GroupName(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v)) +} + +// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. +func Enabled(v bool) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v)) +} + +// IntervalSeconds applies equality check predicate on the "interval_seconds" field. It's identical to IntervalSecondsEQ. +func IntervalSeconds(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v)) +} + +// LastCheckedAt applies equality check predicate on the "last_checked_at" field. It's identical to LastCheckedAtEQ. +func LastCheckedAt(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v)) +} + +// CreatedBy applies equality check predicate on the "created_by" field. It's identical to CreatedByEQ. +func CreatedBy(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContainsFold(FieldName, v)) +} + +// ProviderEQ applies the EQ predicate on the "provider" field. +func ProviderEQ(v Provider) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldProvider, v)) +} + +// ProviderNEQ applies the NEQ predicate on the "provider" field. +func ProviderNEQ(v Provider) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldProvider, v)) +} + +// ProviderIn applies the In predicate on the "provider" field. +func ProviderIn(vs ...Provider) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldProvider, vs...)) +} + +// ProviderNotIn applies the NotIn predicate on the "provider" field. +func ProviderNotIn(vs ...Provider) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldProvider, vs...)) +} + +// EndpointEQ applies the EQ predicate on the "endpoint" field. +func EndpointEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldEndpoint, v)) +} + +// EndpointNEQ applies the NEQ predicate on the "endpoint" field. +func EndpointNEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldEndpoint, v)) +} + +// EndpointIn applies the In predicate on the "endpoint" field. +func EndpointIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldEndpoint, vs...)) +} + +// EndpointNotIn applies the NotIn predicate on the "endpoint" field. +func EndpointNotIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldEndpoint, vs...)) +} + +// EndpointGT applies the GT predicate on the "endpoint" field. +func EndpointGT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldEndpoint, v)) +} + +// EndpointGTE applies the GTE predicate on the "endpoint" field. +func EndpointGTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldEndpoint, v)) +} + +// EndpointLT applies the LT predicate on the "endpoint" field. +func EndpointLT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldEndpoint, v)) +} + +// EndpointLTE applies the LTE predicate on the "endpoint" field. +func EndpointLTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldEndpoint, v)) +} + +// EndpointContains applies the Contains predicate on the "endpoint" field. +func EndpointContains(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContains(FieldEndpoint, v)) +} + +// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field. +func EndpointHasPrefix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldEndpoint, v)) +} + +// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field. +func EndpointHasSuffix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldEndpoint, v)) +} + +// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field. +func EndpointEqualFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEqualFold(FieldEndpoint, v)) +} + +// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field. +func EndpointContainsFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContainsFold(FieldEndpoint, v)) +} + +// APIKeyEncryptedEQ applies the EQ predicate on the "api_key_encrypted" field. +func APIKeyEncryptedEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedNEQ applies the NEQ predicate on the "api_key_encrypted" field. +func APIKeyEncryptedNEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedIn applies the In predicate on the "api_key_encrypted" field. +func APIKeyEncryptedIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldAPIKeyEncrypted, vs...)) +} + +// APIKeyEncryptedNotIn applies the NotIn predicate on the "api_key_encrypted" field. +func APIKeyEncryptedNotIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldAPIKeyEncrypted, vs...)) +} + +// APIKeyEncryptedGT applies the GT predicate on the "api_key_encrypted" field. +func APIKeyEncryptedGT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedGTE applies the GTE predicate on the "api_key_encrypted" field. +func APIKeyEncryptedGTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedLT applies the LT predicate on the "api_key_encrypted" field. +func APIKeyEncryptedLT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedLTE applies the LTE predicate on the "api_key_encrypted" field. +func APIKeyEncryptedLTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedContains applies the Contains predicate on the "api_key_encrypted" field. +func APIKeyEncryptedContains(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContains(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedHasPrefix applies the HasPrefix predicate on the "api_key_encrypted" field. +func APIKeyEncryptedHasPrefix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedHasSuffix applies the HasSuffix predicate on the "api_key_encrypted" field. +func APIKeyEncryptedHasSuffix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedEqualFold applies the EqualFold predicate on the "api_key_encrypted" field. +func APIKeyEncryptedEqualFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEqualFold(FieldAPIKeyEncrypted, v)) +} + +// APIKeyEncryptedContainsFold applies the ContainsFold predicate on the "api_key_encrypted" field. +func APIKeyEncryptedContainsFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContainsFold(FieldAPIKeyEncrypted, v)) +} + +// PrimaryModelEQ applies the EQ predicate on the "primary_model" field. +func PrimaryModelEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldPrimaryModel, v)) +} + +// PrimaryModelNEQ applies the NEQ predicate on the "primary_model" field. +func PrimaryModelNEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldPrimaryModel, v)) +} + +// PrimaryModelIn applies the In predicate on the "primary_model" field. +func PrimaryModelIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldPrimaryModel, vs...)) +} + +// PrimaryModelNotIn applies the NotIn predicate on the "primary_model" field. +func PrimaryModelNotIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldPrimaryModel, vs...)) +} + +// PrimaryModelGT applies the GT predicate on the "primary_model" field. +func PrimaryModelGT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldPrimaryModel, v)) +} + +// PrimaryModelGTE applies the GTE predicate on the "primary_model" field. +func PrimaryModelGTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldPrimaryModel, v)) +} + +// PrimaryModelLT applies the LT predicate on the "primary_model" field. +func PrimaryModelLT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldPrimaryModel, v)) +} + +// PrimaryModelLTE applies the LTE predicate on the "primary_model" field. +func PrimaryModelLTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldPrimaryModel, v)) +} + +// PrimaryModelContains applies the Contains predicate on the "primary_model" field. +func PrimaryModelContains(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContains(FieldPrimaryModel, v)) +} + +// PrimaryModelHasPrefix applies the HasPrefix predicate on the "primary_model" field. +func PrimaryModelHasPrefix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldPrimaryModel, v)) +} + +// PrimaryModelHasSuffix applies the HasSuffix predicate on the "primary_model" field. +func PrimaryModelHasSuffix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldPrimaryModel, v)) +} + +// PrimaryModelEqualFold applies the EqualFold predicate on the "primary_model" field. +func PrimaryModelEqualFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEqualFold(FieldPrimaryModel, v)) +} + +// PrimaryModelContainsFold applies the ContainsFold predicate on the "primary_model" field. +func PrimaryModelContainsFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContainsFold(FieldPrimaryModel, v)) +} + +// GroupNameEQ applies the EQ predicate on the "group_name" field. +func GroupNameEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldGroupName, v)) +} + +// GroupNameNEQ applies the NEQ predicate on the "group_name" field. +func GroupNameNEQ(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldGroupName, v)) +} + +// GroupNameIn applies the In predicate on the "group_name" field. +func GroupNameIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldGroupName, vs...)) +} + +// GroupNameNotIn applies the NotIn predicate on the "group_name" field. +func GroupNameNotIn(vs ...string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldGroupName, vs...)) +} + +// GroupNameGT applies the GT predicate on the "group_name" field. +func GroupNameGT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldGroupName, v)) +} + +// GroupNameGTE applies the GTE predicate on the "group_name" field. +func GroupNameGTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldGroupName, v)) +} + +// GroupNameLT applies the LT predicate on the "group_name" field. +func GroupNameLT(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldGroupName, v)) +} + +// GroupNameLTE applies the LTE predicate on the "group_name" field. +func GroupNameLTE(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldGroupName, v)) +} + +// GroupNameContains applies the Contains predicate on the "group_name" field. +func GroupNameContains(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContains(FieldGroupName, v)) +} + +// GroupNameHasPrefix applies the HasPrefix predicate on the "group_name" field. +func GroupNameHasPrefix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasPrefix(FieldGroupName, v)) +} + +// GroupNameHasSuffix applies the HasSuffix predicate on the "group_name" field. +func GroupNameHasSuffix(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldHasSuffix(FieldGroupName, v)) +} + +// GroupNameIsNil applies the IsNil predicate on the "group_name" field. +func GroupNameIsNil() predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIsNull(FieldGroupName)) +} + +// GroupNameNotNil applies the NotNil predicate on the "group_name" field. +func GroupNameNotNil() predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotNull(FieldGroupName)) +} + +// GroupNameEqualFold applies the EqualFold predicate on the "group_name" field. +func GroupNameEqualFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEqualFold(FieldGroupName, v)) +} + +// GroupNameContainsFold applies the ContainsFold predicate on the "group_name" field. +func GroupNameContainsFold(v string) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldContainsFold(FieldGroupName, v)) +} + +// EnabledEQ applies the EQ predicate on the "enabled" field. +func EnabledEQ(v bool) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldEnabled, v)) +} + +// EnabledNEQ applies the NEQ predicate on the "enabled" field. +func EnabledNEQ(v bool) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldEnabled, v)) +} + +// IntervalSecondsEQ applies the EQ predicate on the "interval_seconds" field. +func IntervalSecondsEQ(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldIntervalSeconds, v)) +} + +// IntervalSecondsNEQ applies the NEQ predicate on the "interval_seconds" field. +func IntervalSecondsNEQ(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldIntervalSeconds, v)) +} + +// IntervalSecondsIn applies the In predicate on the "interval_seconds" field. +func IntervalSecondsIn(vs ...int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldIntervalSeconds, vs...)) +} + +// IntervalSecondsNotIn applies the NotIn predicate on the "interval_seconds" field. +func IntervalSecondsNotIn(vs ...int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldIntervalSeconds, vs...)) +} + +// IntervalSecondsGT applies the GT predicate on the "interval_seconds" field. +func IntervalSecondsGT(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldIntervalSeconds, v)) +} + +// IntervalSecondsGTE applies the GTE predicate on the "interval_seconds" field. +func IntervalSecondsGTE(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldIntervalSeconds, v)) +} + +// IntervalSecondsLT applies the LT predicate on the "interval_seconds" field. +func IntervalSecondsLT(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldIntervalSeconds, v)) +} + +// IntervalSecondsLTE applies the LTE predicate on the "interval_seconds" field. +func IntervalSecondsLTE(v int) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldIntervalSeconds, v)) +} + +// LastCheckedAtEQ applies the EQ predicate on the "last_checked_at" field. +func LastCheckedAtEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldLastCheckedAt, v)) +} + +// LastCheckedAtNEQ applies the NEQ predicate on the "last_checked_at" field. +func LastCheckedAtNEQ(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldLastCheckedAt, v)) +} + +// LastCheckedAtIn applies the In predicate on the "last_checked_at" field. +func LastCheckedAtIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldLastCheckedAt, vs...)) +} + +// LastCheckedAtNotIn applies the NotIn predicate on the "last_checked_at" field. +func LastCheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldLastCheckedAt, vs...)) +} + +// LastCheckedAtGT applies the GT predicate on the "last_checked_at" field. +func LastCheckedAtGT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldLastCheckedAt, v)) +} + +// LastCheckedAtGTE applies the GTE predicate on the "last_checked_at" field. +func LastCheckedAtGTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldLastCheckedAt, v)) +} + +// LastCheckedAtLT applies the LT predicate on the "last_checked_at" field. +func LastCheckedAtLT(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldLastCheckedAt, v)) +} + +// LastCheckedAtLTE applies the LTE predicate on the "last_checked_at" field. +func LastCheckedAtLTE(v time.Time) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldLastCheckedAt, v)) +} + +// LastCheckedAtIsNil applies the IsNil predicate on the "last_checked_at" field. +func LastCheckedAtIsNil() predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIsNull(FieldLastCheckedAt)) +} + +// LastCheckedAtNotNil applies the NotNil predicate on the "last_checked_at" field. +func LastCheckedAtNotNil() predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotNull(FieldLastCheckedAt)) +} + +// CreatedByEQ applies the EQ predicate on the "created_by" field. +func CreatedByEQ(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldEQ(FieldCreatedBy, v)) +} + +// CreatedByNEQ applies the NEQ predicate on the "created_by" field. +func CreatedByNEQ(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNEQ(FieldCreatedBy, v)) +} + +// CreatedByIn applies the In predicate on the "created_by" field. +func CreatedByIn(vs ...int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldIn(FieldCreatedBy, vs...)) +} + +// CreatedByNotIn applies the NotIn predicate on the "created_by" field. +func CreatedByNotIn(vs ...int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldNotIn(FieldCreatedBy, vs...)) +} + +// CreatedByGT applies the GT predicate on the "created_by" field. +func CreatedByGT(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGT(FieldCreatedBy, v)) +} + +// CreatedByGTE applies the GTE predicate on the "created_by" field. +func CreatedByGTE(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldGTE(FieldCreatedBy, v)) +} + +// CreatedByLT applies the LT predicate on the "created_by" field. +func CreatedByLT(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLT(FieldCreatedBy, v)) +} + +// CreatedByLTE applies the LTE predicate on the "created_by" field. +func CreatedByLTE(v int64) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.FieldLTE(FieldCreatedBy, v)) +} + +// HasHistory applies the HasEdge predicate on the "history" edge. +func HasHistory() predicate.ChannelMonitor { + return predicate.ChannelMonitor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasHistoryWith applies the HasEdge predicate on the "history" edge with a given conditions (other predicates). +func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelMonitor { + return predicate.ChannelMonitor(func(s *sql.Selector) { + step := newHistoryStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ChannelMonitor) predicate.ChannelMonitor { + return predicate.ChannelMonitor(sql.NotPredicates(p)) +} diff --git a/backend/ent/channelmonitor_create.go b/backend/ent/channelmonitor_create.go new file mode 100644 index 00000000..ad735f3e --- /dev/null +++ b/backend/ent/channelmonitor_create.go @@ -0,0 +1,1270 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" +) + +// ChannelMonitorCreate is the builder for creating a ChannelMonitor entity. +type ChannelMonitorCreate struct { + config + mutation *ChannelMonitorMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *ChannelMonitorCreate) SetCreatedAt(v time.Time) *ChannelMonitorCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *ChannelMonitorCreate) SetNillableCreatedAt(v *time.Time) *ChannelMonitorCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *ChannelMonitorCreate) SetUpdatedAt(v time.Time) *ChannelMonitorCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *ChannelMonitorCreate) SetNillableUpdatedAt(v *time.Time) *ChannelMonitorCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *ChannelMonitorCreate) SetName(v string) *ChannelMonitorCreate { + _c.mutation.SetName(v) + return _c +} + +// SetProvider sets the "provider" field. +func (_c *ChannelMonitorCreate) SetProvider(v channelmonitor.Provider) *ChannelMonitorCreate { + _c.mutation.SetProvider(v) + return _c +} + +// SetEndpoint sets the "endpoint" field. +func (_c *ChannelMonitorCreate) SetEndpoint(v string) *ChannelMonitorCreate { + _c.mutation.SetEndpoint(v) + return _c +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (_c *ChannelMonitorCreate) SetAPIKeyEncrypted(v string) *ChannelMonitorCreate { + _c.mutation.SetAPIKeyEncrypted(v) + return _c +} + +// SetPrimaryModel sets the "primary_model" field. +func (_c *ChannelMonitorCreate) SetPrimaryModel(v string) *ChannelMonitorCreate { + _c.mutation.SetPrimaryModel(v) + return _c +} + +// SetExtraModels sets the "extra_models" field. +func (_c *ChannelMonitorCreate) SetExtraModels(v []string) *ChannelMonitorCreate { + _c.mutation.SetExtraModels(v) + return _c +} + +// SetGroupName sets the "group_name" field. +func (_c *ChannelMonitorCreate) SetGroupName(v string) *ChannelMonitorCreate { + _c.mutation.SetGroupName(v) + return _c +} + +// SetNillableGroupName sets the "group_name" field if the given value is not nil. +func (_c *ChannelMonitorCreate) SetNillableGroupName(v *string) *ChannelMonitorCreate { + if v != nil { + _c.SetGroupName(*v) + } + return _c +} + +// SetEnabled sets the "enabled" field. +func (_c *ChannelMonitorCreate) SetEnabled(v bool) *ChannelMonitorCreate { + _c.mutation.SetEnabled(v) + return _c +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_c *ChannelMonitorCreate) SetNillableEnabled(v *bool) *ChannelMonitorCreate { + if v != nil { + _c.SetEnabled(*v) + } + return _c +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (_c *ChannelMonitorCreate) SetIntervalSeconds(v int) *ChannelMonitorCreate { + _c.mutation.SetIntervalSeconds(v) + return _c +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (_c *ChannelMonitorCreate) SetLastCheckedAt(v time.Time) *ChannelMonitorCreate { + _c.mutation.SetLastCheckedAt(v) + return _c +} + +// SetNillableLastCheckedAt sets the "last_checked_at" field if the given value is not nil. +func (_c *ChannelMonitorCreate) SetNillableLastCheckedAt(v *time.Time) *ChannelMonitorCreate { + if v != nil { + _c.SetLastCheckedAt(*v) + } + return _c +} + +// SetCreatedBy sets the "created_by" field. +func (_c *ChannelMonitorCreate) SetCreatedBy(v int64) *ChannelMonitorCreate { + _c.mutation.SetCreatedBy(v) + return _c +} + +// AddHistoryIDs adds the "history" edge to the ChannelMonitorHistory entity by IDs. +func (_c *ChannelMonitorCreate) AddHistoryIDs(ids ...int64) *ChannelMonitorCreate { + _c.mutation.AddHistoryIDs(ids...) + return _c +} + +// AddHistory adds the "history" edges to the ChannelMonitorHistory entity. +func (_c *ChannelMonitorCreate) AddHistory(v ...*ChannelMonitorHistory) *ChannelMonitorCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddHistoryIDs(ids...) +} + +// Mutation returns the ChannelMonitorMutation object of the builder. +func (_c *ChannelMonitorCreate) Mutation() *ChannelMonitorMutation { + return _c.mutation +} + +// Save creates the ChannelMonitor in the database. +func (_c *ChannelMonitorCreate) Save(ctx context.Context) (*ChannelMonitor, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ChannelMonitorCreate) SaveX(ctx context.Context) *ChannelMonitor { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ChannelMonitorCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := channelmonitor.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := channelmonitor.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.ExtraModels(); !ok { + v := channelmonitor.DefaultExtraModels + _c.mutation.SetExtraModels(v) + } + if _, ok := _c.mutation.GroupName(); !ok { + v := channelmonitor.DefaultGroupName + _c.mutation.SetGroupName(v) + } + if _, ok := _c.mutation.Enabled(); !ok { + v := channelmonitor.DefaultEnabled + _c.mutation.SetEnabled(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ChannelMonitorCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ChannelMonitor.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ChannelMonitor.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ChannelMonitor.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := channelmonitor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.name": %w`, err)} + } + } + if _, ok := _c.mutation.Provider(); !ok { + return &ValidationError{Name: "provider", err: errors.New(`ent: missing required field "ChannelMonitor.provider"`)} + } + if v, ok := _c.mutation.Provider(); ok { + if err := channelmonitor.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.provider": %w`, err)} + } + } + if _, ok := _c.mutation.Endpoint(); !ok { + return &ValidationError{Name: "endpoint", err: errors.New(`ent: missing required field "ChannelMonitor.endpoint"`)} + } + if v, ok := _c.mutation.Endpoint(); ok { + if err := channelmonitor.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.endpoint": %w`, err)} + } + } + if _, ok := _c.mutation.APIKeyEncrypted(); !ok { + return &ValidationError{Name: "api_key_encrypted", err: errors.New(`ent: missing required field "ChannelMonitor.api_key_encrypted"`)} + } + if v, ok := _c.mutation.APIKeyEncrypted(); ok { + if err := channelmonitor.APIKeyEncryptedValidator(v); err != nil { + return &ValidationError{Name: "api_key_encrypted", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.api_key_encrypted": %w`, err)} + } + } + if _, ok := _c.mutation.PrimaryModel(); !ok { + return &ValidationError{Name: "primary_model", err: errors.New(`ent: missing required field "ChannelMonitor.primary_model"`)} + } + if v, ok := _c.mutation.PrimaryModel(); ok { + if err := channelmonitor.PrimaryModelValidator(v); err != nil { + return &ValidationError{Name: "primary_model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.primary_model": %w`, err)} + } + } + if _, ok := _c.mutation.ExtraModels(); !ok { + return &ValidationError{Name: "extra_models", err: errors.New(`ent: missing required field "ChannelMonitor.extra_models"`)} + } + if v, ok := _c.mutation.GroupName(); ok { + if err := channelmonitor.GroupNameValidator(v); err != nil { + return &ValidationError{Name: "group_name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.group_name": %w`, err)} + } + } + if _, ok := _c.mutation.Enabled(); !ok { + return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "ChannelMonitor.enabled"`)} + } + if _, ok := _c.mutation.IntervalSeconds(); !ok { + return &ValidationError{Name: "interval_seconds", err: errors.New(`ent: missing required field "ChannelMonitor.interval_seconds"`)} + } + if v, ok := _c.mutation.IntervalSeconds(); ok { + if err := channelmonitor.IntervalSecondsValidator(v); err != nil { + return &ValidationError{Name: "interval_seconds", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.interval_seconds": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedBy(); !ok { + return &ValidationError{Name: "created_by", err: errors.New(`ent: missing required field "ChannelMonitor.created_by"`)} + } + return nil +} + +func (_c *ChannelMonitorCreate) sqlSave(ctx context.Context) (*ChannelMonitor, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ChannelMonitorCreate) createSpec() (*ChannelMonitor, *sqlgraph.CreateSpec) { + var ( + _node = &ChannelMonitor{config: _c.config} + _spec = sqlgraph.NewCreateSpec(channelmonitor.Table, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(channelmonitor.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(channelmonitor.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(channelmonitor.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Provider(); ok { + _spec.SetField(channelmonitor.FieldProvider, field.TypeEnum, value) + _node.Provider = value + } + if value, ok := _c.mutation.Endpoint(); ok { + _spec.SetField(channelmonitor.FieldEndpoint, field.TypeString, value) + _node.Endpoint = value + } + if value, ok := _c.mutation.APIKeyEncrypted(); ok { + _spec.SetField(channelmonitor.FieldAPIKeyEncrypted, field.TypeString, value) + _node.APIKeyEncrypted = value + } + if value, ok := _c.mutation.PrimaryModel(); ok { + _spec.SetField(channelmonitor.FieldPrimaryModel, field.TypeString, value) + _node.PrimaryModel = value + } + if value, ok := _c.mutation.ExtraModels(); ok { + _spec.SetField(channelmonitor.FieldExtraModels, field.TypeJSON, value) + _node.ExtraModels = value + } + if value, ok := _c.mutation.GroupName(); ok { + _spec.SetField(channelmonitor.FieldGroupName, field.TypeString, value) + _node.GroupName = value + } + if value, ok := _c.mutation.Enabled(); ok { + _spec.SetField(channelmonitor.FieldEnabled, field.TypeBool, value) + _node.Enabled = value + } + if value, ok := _c.mutation.IntervalSeconds(); ok { + _spec.SetField(channelmonitor.FieldIntervalSeconds, field.TypeInt, value) + _node.IntervalSeconds = value + } + if value, ok := _c.mutation.LastCheckedAt(); ok { + _spec.SetField(channelmonitor.FieldLastCheckedAt, field.TypeTime, value) + _node.LastCheckedAt = &value + } + if value, ok := _c.mutation.CreatedBy(); ok { + _spec.SetField(channelmonitor.FieldCreatedBy, field.TypeInt64, value) + _node.CreatedBy = value + } + if nodes := _c.mutation.HistoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitor.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorUpsertOne { + _c.conflict = opts + return &ChannelMonitorUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorCreate) OnConflictColumns(columns ...string) *ChannelMonitorUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorUpsertOne{ + create: _c, + } +} + +type ( + // ChannelMonitorUpsertOne is the builder for "upsert"-ing + // one ChannelMonitor node. + ChannelMonitorUpsertOne struct { + create *ChannelMonitorCreate + } + + // ChannelMonitorUpsert is the "OnConflict" setter. + ChannelMonitorUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *ChannelMonitorUpsert) SetUpdatedAt(v time.Time) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateUpdatedAt() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldUpdatedAt) + return u +} + +// SetName sets the "name" field. +func (u *ChannelMonitorUpsert) SetName(v string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateName() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldName) + return u +} + +// SetProvider sets the "provider" field. +func (u *ChannelMonitorUpsert) SetProvider(v channelmonitor.Provider) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldProvider, v) + return u +} + +// UpdateProvider sets the "provider" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateProvider() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldProvider) + return u +} + +// SetEndpoint sets the "endpoint" field. +func (u *ChannelMonitorUpsert) SetEndpoint(v string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldEndpoint, v) + return u +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateEndpoint() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldEndpoint) + return u +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (u *ChannelMonitorUpsert) SetAPIKeyEncrypted(v string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldAPIKeyEncrypted, v) + return u +} + +// UpdateAPIKeyEncrypted sets the "api_key_encrypted" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateAPIKeyEncrypted() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldAPIKeyEncrypted) + return u +} + +// SetPrimaryModel sets the "primary_model" field. +func (u *ChannelMonitorUpsert) SetPrimaryModel(v string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldPrimaryModel, v) + return u +} + +// UpdatePrimaryModel sets the "primary_model" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdatePrimaryModel() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldPrimaryModel) + return u +} + +// SetExtraModels sets the "extra_models" field. +func (u *ChannelMonitorUpsert) SetExtraModels(v []string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldExtraModels, v) + return u +} + +// UpdateExtraModels sets the "extra_models" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateExtraModels() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldExtraModels) + return u +} + +// SetGroupName sets the "group_name" field. +func (u *ChannelMonitorUpsert) SetGroupName(v string) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldGroupName, v) + return u +} + +// UpdateGroupName sets the "group_name" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateGroupName() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldGroupName) + return u +} + +// ClearGroupName clears the value of the "group_name" field. +func (u *ChannelMonitorUpsert) ClearGroupName() *ChannelMonitorUpsert { + u.SetNull(channelmonitor.FieldGroupName) + return u +} + +// SetEnabled sets the "enabled" field. +func (u *ChannelMonitorUpsert) SetEnabled(v bool) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldEnabled, v) + return u +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateEnabled() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldEnabled) + return u +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (u *ChannelMonitorUpsert) SetIntervalSeconds(v int) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldIntervalSeconds, v) + return u +} + +// UpdateIntervalSeconds sets the "interval_seconds" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateIntervalSeconds() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldIntervalSeconds) + return u +} + +// AddIntervalSeconds adds v to the "interval_seconds" field. +func (u *ChannelMonitorUpsert) AddIntervalSeconds(v int) *ChannelMonitorUpsert { + u.Add(channelmonitor.FieldIntervalSeconds, v) + return u +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (u *ChannelMonitorUpsert) SetLastCheckedAt(v time.Time) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldLastCheckedAt, v) + return u +} + +// UpdateLastCheckedAt sets the "last_checked_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateLastCheckedAt() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldLastCheckedAt) + return u +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (u *ChannelMonitorUpsert) ClearLastCheckedAt() *ChannelMonitorUpsert { + u.SetNull(channelmonitor.FieldLastCheckedAt) + return u +} + +// SetCreatedBy sets the "created_by" field. +func (u *ChannelMonitorUpsert) SetCreatedBy(v int64) *ChannelMonitorUpsert { + u.Set(channelmonitor.FieldCreatedBy, v) + return u +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *ChannelMonitorUpsert) UpdateCreatedBy() *ChannelMonitorUpsert { + u.SetExcluded(channelmonitor.FieldCreatedBy) + return u +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *ChannelMonitorUpsert) AddCreatedBy(v int64) *ChannelMonitorUpsert { + u.Add(channelmonitor.FieldCreatedBy, v) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorUpsertOne) UpdateNewValues() *ChannelMonitorUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(channelmonitor.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorUpsertOne) Ignore() *ChannelMonitorUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorUpsertOne) DoNothing() *ChannelMonitorUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorCreate.OnConflict +// documentation for more info. +func (u *ChannelMonitorUpsertOne) Update(set func(*ChannelMonitorUpsert)) *ChannelMonitorUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ChannelMonitorUpsertOne) SetUpdatedAt(v time.Time) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateUpdatedAt() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetName sets the "name" field. +func (u *ChannelMonitorUpsertOne) SetName(v string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateName() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateName() + }) +} + +// SetProvider sets the "provider" field. +func (u *ChannelMonitorUpsertOne) SetProvider(v channelmonitor.Provider) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetProvider(v) + }) +} + +// UpdateProvider sets the "provider" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateProvider() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateProvider() + }) +} + +// SetEndpoint sets the "endpoint" field. +func (u *ChannelMonitorUpsertOne) SetEndpoint(v string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetEndpoint(v) + }) +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateEndpoint() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateEndpoint() + }) +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (u *ChannelMonitorUpsertOne) SetAPIKeyEncrypted(v string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetAPIKeyEncrypted(v) + }) +} + +// UpdateAPIKeyEncrypted sets the "api_key_encrypted" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateAPIKeyEncrypted() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateAPIKeyEncrypted() + }) +} + +// SetPrimaryModel sets the "primary_model" field. +func (u *ChannelMonitorUpsertOne) SetPrimaryModel(v string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetPrimaryModel(v) + }) +} + +// UpdatePrimaryModel sets the "primary_model" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdatePrimaryModel() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdatePrimaryModel() + }) +} + +// SetExtraModels sets the "extra_models" field. +func (u *ChannelMonitorUpsertOne) SetExtraModels(v []string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetExtraModels(v) + }) +} + +// UpdateExtraModels sets the "extra_models" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateExtraModels() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateExtraModels() + }) +} + +// SetGroupName sets the "group_name" field. +func (u *ChannelMonitorUpsertOne) SetGroupName(v string) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetGroupName(v) + }) +} + +// UpdateGroupName sets the "group_name" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateGroupName() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateGroupName() + }) +} + +// ClearGroupName clears the value of the "group_name" field. +func (u *ChannelMonitorUpsertOne) ClearGroupName() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.ClearGroupName() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *ChannelMonitorUpsertOne) SetEnabled(v bool) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateEnabled() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateEnabled() + }) +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (u *ChannelMonitorUpsertOne) SetIntervalSeconds(v int) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetIntervalSeconds(v) + }) +} + +// AddIntervalSeconds adds v to the "interval_seconds" field. +func (u *ChannelMonitorUpsertOne) AddIntervalSeconds(v int) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.AddIntervalSeconds(v) + }) +} + +// UpdateIntervalSeconds sets the "interval_seconds" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateIntervalSeconds() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateIntervalSeconds() + }) +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (u *ChannelMonitorUpsertOne) SetLastCheckedAt(v time.Time) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetLastCheckedAt(v) + }) +} + +// UpdateLastCheckedAt sets the "last_checked_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateLastCheckedAt() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateLastCheckedAt() + }) +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (u *ChannelMonitorUpsertOne) ClearLastCheckedAt() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.ClearLastCheckedAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *ChannelMonitorUpsertOne) SetCreatedBy(v int64) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *ChannelMonitorUpsertOne) AddCreatedBy(v int64) *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *ChannelMonitorUpsertOne) UpdateCreatedBy() *ChannelMonitorUpsertOne { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateCreatedBy() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ChannelMonitorUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ChannelMonitorUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ChannelMonitorCreateBulk is the builder for creating many ChannelMonitor entities in bulk. +type ChannelMonitorCreateBulk struct { + config + err error + builders []*ChannelMonitorCreate + conflict []sql.ConflictOption +} + +// Save creates the ChannelMonitor entities in the database. +func (_c *ChannelMonitorCreateBulk) Save(ctx context.Context) ([]*ChannelMonitor, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*ChannelMonitor, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ChannelMonitorMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ChannelMonitorCreateBulk) SaveX(ctx context.Context) []*ChannelMonitor { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitor.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorUpsertBulk { + _c.conflict = opts + return &ChannelMonitorUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorUpsertBulk{ + create: _c, + } +} + +// ChannelMonitorUpsertBulk is the builder for "upsert"-ing +// a bulk of ChannelMonitor nodes. +type ChannelMonitorUpsertBulk struct { + create *ChannelMonitorCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorUpsertBulk) UpdateNewValues() *ChannelMonitorUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(channelmonitor.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitor.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorUpsertBulk) Ignore() *ChannelMonitorUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorUpsertBulk) DoNothing() *ChannelMonitorUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorCreateBulk.OnConflict +// documentation for more info. +func (u *ChannelMonitorUpsertBulk) Update(set func(*ChannelMonitorUpsert)) *ChannelMonitorUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ChannelMonitorUpsertBulk) SetUpdatedAt(v time.Time) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateUpdatedAt() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetName sets the "name" field. +func (u *ChannelMonitorUpsertBulk) SetName(v string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateName() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateName() + }) +} + +// SetProvider sets the "provider" field. +func (u *ChannelMonitorUpsertBulk) SetProvider(v channelmonitor.Provider) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetProvider(v) + }) +} + +// UpdateProvider sets the "provider" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateProvider() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateProvider() + }) +} + +// SetEndpoint sets the "endpoint" field. +func (u *ChannelMonitorUpsertBulk) SetEndpoint(v string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetEndpoint(v) + }) +} + +// UpdateEndpoint sets the "endpoint" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateEndpoint() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateEndpoint() + }) +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (u *ChannelMonitorUpsertBulk) SetAPIKeyEncrypted(v string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetAPIKeyEncrypted(v) + }) +} + +// UpdateAPIKeyEncrypted sets the "api_key_encrypted" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateAPIKeyEncrypted() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateAPIKeyEncrypted() + }) +} + +// SetPrimaryModel sets the "primary_model" field. +func (u *ChannelMonitorUpsertBulk) SetPrimaryModel(v string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetPrimaryModel(v) + }) +} + +// UpdatePrimaryModel sets the "primary_model" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdatePrimaryModel() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdatePrimaryModel() + }) +} + +// SetExtraModels sets the "extra_models" field. +func (u *ChannelMonitorUpsertBulk) SetExtraModels(v []string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetExtraModels(v) + }) +} + +// UpdateExtraModels sets the "extra_models" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateExtraModels() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateExtraModels() + }) +} + +// SetGroupName sets the "group_name" field. +func (u *ChannelMonitorUpsertBulk) SetGroupName(v string) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetGroupName(v) + }) +} + +// UpdateGroupName sets the "group_name" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateGroupName() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateGroupName() + }) +} + +// ClearGroupName clears the value of the "group_name" field. +func (u *ChannelMonitorUpsertBulk) ClearGroupName() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.ClearGroupName() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *ChannelMonitorUpsertBulk) SetEnabled(v bool) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateEnabled() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateEnabled() + }) +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (u *ChannelMonitorUpsertBulk) SetIntervalSeconds(v int) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetIntervalSeconds(v) + }) +} + +// AddIntervalSeconds adds v to the "interval_seconds" field. +func (u *ChannelMonitorUpsertBulk) AddIntervalSeconds(v int) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.AddIntervalSeconds(v) + }) +} + +// UpdateIntervalSeconds sets the "interval_seconds" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateIntervalSeconds() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateIntervalSeconds() + }) +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (u *ChannelMonitorUpsertBulk) SetLastCheckedAt(v time.Time) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetLastCheckedAt(v) + }) +} + +// UpdateLastCheckedAt sets the "last_checked_at" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateLastCheckedAt() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateLastCheckedAt() + }) +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (u *ChannelMonitorUpsertBulk) ClearLastCheckedAt() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.ClearLastCheckedAt() + }) +} + +// SetCreatedBy sets the "created_by" field. +func (u *ChannelMonitorUpsertBulk) SetCreatedBy(v int64) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.SetCreatedBy(v) + }) +} + +// AddCreatedBy adds v to the "created_by" field. +func (u *ChannelMonitorUpsertBulk) AddCreatedBy(v int64) *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.AddCreatedBy(v) + }) +} + +// UpdateCreatedBy sets the "created_by" field to the value that was provided on create. +func (u *ChannelMonitorUpsertBulk) UpdateCreatedBy() *ChannelMonitorUpsertBulk { + return u.Update(func(s *ChannelMonitorUpsert) { + s.UpdateCreatedBy() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitor_delete.go b/backend/ent/channelmonitor_delete.go new file mode 100644 index 00000000..500dbb48 --- /dev/null +++ b/backend/ent/channelmonitor_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorDelete is the builder for deleting a ChannelMonitor entity. +type ChannelMonitorDelete struct { + config + hooks []Hook + mutation *ChannelMonitorMutation +} + +// Where appends a list predicates to the ChannelMonitorDelete builder. +func (_d *ChannelMonitorDelete) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ChannelMonitorDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ChannelMonitorDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(channelmonitor.Table, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ChannelMonitorDeleteOne is the builder for deleting a single ChannelMonitor entity. +type ChannelMonitorDeleteOne struct { + _d *ChannelMonitorDelete +} + +// Where appends a list predicates to the ChannelMonitorDelete builder. +func (_d *ChannelMonitorDeleteOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ChannelMonitorDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{channelmonitor.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitor_query.go b/backend/ent/channelmonitor_query.go new file mode 100644 index 00000000..6a532587 --- /dev/null +++ b/backend/ent/channelmonitor_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorQuery is the builder for querying ChannelMonitor entities. +type ChannelMonitorQuery struct { + config + ctx *QueryContext + order []channelmonitor.OrderOption + inters []Interceptor + predicates []predicate.ChannelMonitor + withHistory *ChannelMonitorHistoryQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ChannelMonitorQuery builder. +func (_q *ChannelMonitorQuery) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ChannelMonitorQuery) Limit(limit int) *ChannelMonitorQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ChannelMonitorQuery) Offset(offset int) *ChannelMonitorQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ChannelMonitorQuery) Unique(unique bool) *ChannelMonitorQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ChannelMonitorQuery) Order(o ...channelmonitor.OrderOption) *ChannelMonitorQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryHistory chains the current query on the "history" edge. +func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery { + query := (&ChannelMonitorHistoryClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector), + sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ChannelMonitor entity from the query. +// Returns a *NotFoundError when no ChannelMonitor was found. +func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{channelmonitor.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ChannelMonitorQuery) FirstX(ctx context.Context) *ChannelMonitor { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ChannelMonitor ID from the query. +// Returns a *NotFoundError when no ChannelMonitor ID was found. +func (_q *ChannelMonitorQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{channelmonitor.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ChannelMonitorQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ChannelMonitor entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ChannelMonitor entity is found. +// Returns a *NotFoundError when no ChannelMonitor entities are found. +func (_q *ChannelMonitorQuery) Only(ctx context.Context) (*ChannelMonitor, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{channelmonitor.Label} + default: + return nil, &NotSingularError{channelmonitor.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ChannelMonitorQuery) OnlyX(ctx context.Context) *ChannelMonitor { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ChannelMonitor ID in the query. +// Returns a *NotSingularError when more than one ChannelMonitor ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ChannelMonitorQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{channelmonitor.Label} + default: + err = &NotSingularError{channelmonitor.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ChannelMonitorQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ChannelMonitors. +func (_q *ChannelMonitorQuery) All(ctx context.Context) ([]*ChannelMonitor, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ChannelMonitor, *ChannelMonitorQuery]() + return withInterceptors[[]*ChannelMonitor](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ChannelMonitorQuery) AllX(ctx context.Context) []*ChannelMonitor { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ChannelMonitor IDs. +func (_q *ChannelMonitorQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(channelmonitor.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ChannelMonitorQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ChannelMonitorQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ChannelMonitorQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ChannelMonitorQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ChannelMonitorQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ChannelMonitorQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery { + if _q == nil { + return nil + } + return &ChannelMonitorQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]channelmonitor.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ChannelMonitor{}, _q.predicates...), + withHistory: _q.withHistory.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithHistory tells the query-builder to eager-load the nodes that are connected to +// the "history" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQuery)) *ChannelMonitorQuery { + query := (&ChannelMonitorHistoryClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withHistory = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ChannelMonitor.Query(). +// GroupBy(channelmonitor.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ChannelMonitorQuery) GroupBy(field string, fields ...string) *ChannelMonitorGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ChannelMonitorGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = channelmonitor.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.ChannelMonitor.Query(). +// Select(channelmonitor.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *ChannelMonitorQuery) Select(fields ...string) *ChannelMonitorSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ChannelMonitorSelect{ChannelMonitorQuery: _q} + sbuild.label = channelmonitor.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ChannelMonitorSelect configured with the given aggregations. +func (_q *ChannelMonitorQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ChannelMonitorQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !channelmonitor.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitor, error) { + var ( + nodes = []*ChannelMonitor{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withHistory != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ChannelMonitor).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ChannelMonitor{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withHistory; query != nil { + if err := _q.loadHistory(ctx, query, nodes, + func(n *ChannelMonitor) { n.Edges.History = []*ChannelMonitorHistory{} }, + func(n *ChannelMonitor, e *ChannelMonitorHistory) { n.Edges.History = append(n.Edges.History, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMonitorHistoryQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorHistory)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*ChannelMonitor) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(channelmonitorhistory.FieldMonitorID) + } + query.Where(predicate.ChannelMonitorHistory(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(channelmonitor.HistoryColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.MonitorID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ChannelMonitorQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitor.FieldID) + for i := range fields { + if fields[i] != channelmonitor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ChannelMonitorQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(channelmonitor.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = channelmonitor.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *ChannelMonitorQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *ChannelMonitorQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// ChannelMonitorGroupBy is the group-by builder for ChannelMonitor entities. +type ChannelMonitorGroupBy struct { + selector + build *ChannelMonitorQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ChannelMonitorGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ChannelMonitorGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ChannelMonitorGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ChannelMonitorSelect is the builder for selecting fields of ChannelMonitor entities. +type ChannelMonitorSelect struct { + *ChannelMonitorQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ChannelMonitorSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ChannelMonitorSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorQuery, *ChannelMonitorSelect](ctx, _s.ChannelMonitorQuery, _s, _s.inters, v) +} + +func (_s *ChannelMonitorSelect) sqlScan(ctx context.Context, root *ChannelMonitorQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/channelmonitor_update.go b/backend/ent/channelmonitor_update.go new file mode 100644 index 00000000..df575a9f --- /dev/null +++ b/backend/ent/channelmonitor_update.go @@ -0,0 +1,918 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorUpdate is the builder for updating ChannelMonitor entities. +type ChannelMonitorUpdate struct { + config + hooks []Hook + mutation *ChannelMonitorMutation +} + +// Where appends a list predicates to the ChannelMonitorUpdate builder. +func (_u *ChannelMonitorUpdate) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ChannelMonitorUpdate) SetUpdatedAt(v time.Time) *ChannelMonitorUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetName sets the "name" field. +func (_u *ChannelMonitorUpdate) SetName(v string) *ChannelMonitorUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableName(v *string) *ChannelMonitorUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProvider sets the "provider" field. +func (_u *ChannelMonitorUpdate) SetProvider(v channelmonitor.Provider) *ChannelMonitorUpdate { + _u.mutation.SetProvider(v) + return _u +} + +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableProvider(v *channelmonitor.Provider) *ChannelMonitorUpdate { + if v != nil { + _u.SetProvider(*v) + } + return _u +} + +// SetEndpoint sets the "endpoint" field. +func (_u *ChannelMonitorUpdate) SetEndpoint(v string) *ChannelMonitorUpdate { + _u.mutation.SetEndpoint(v) + return _u +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableEndpoint(v *string) *ChannelMonitorUpdate { + if v != nil { + _u.SetEndpoint(*v) + } + return _u +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (_u *ChannelMonitorUpdate) SetAPIKeyEncrypted(v string) *ChannelMonitorUpdate { + _u.mutation.SetAPIKeyEncrypted(v) + return _u +} + +// SetNillableAPIKeyEncrypted sets the "api_key_encrypted" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableAPIKeyEncrypted(v *string) *ChannelMonitorUpdate { + if v != nil { + _u.SetAPIKeyEncrypted(*v) + } + return _u +} + +// SetPrimaryModel sets the "primary_model" field. +func (_u *ChannelMonitorUpdate) SetPrimaryModel(v string) *ChannelMonitorUpdate { + _u.mutation.SetPrimaryModel(v) + return _u +} + +// SetNillablePrimaryModel sets the "primary_model" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillablePrimaryModel(v *string) *ChannelMonitorUpdate { + if v != nil { + _u.SetPrimaryModel(*v) + } + return _u +} + +// SetExtraModels sets the "extra_models" field. +func (_u *ChannelMonitorUpdate) SetExtraModels(v []string) *ChannelMonitorUpdate { + _u.mutation.SetExtraModels(v) + return _u +} + +// AppendExtraModels appends value to the "extra_models" field. +func (_u *ChannelMonitorUpdate) AppendExtraModels(v []string) *ChannelMonitorUpdate { + _u.mutation.AppendExtraModels(v) + return _u +} + +// SetGroupName sets the "group_name" field. +func (_u *ChannelMonitorUpdate) SetGroupName(v string) *ChannelMonitorUpdate { + _u.mutation.SetGroupName(v) + return _u +} + +// SetNillableGroupName sets the "group_name" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableGroupName(v *string) *ChannelMonitorUpdate { + if v != nil { + _u.SetGroupName(*v) + } + return _u +} + +// ClearGroupName clears the value of the "group_name" field. +func (_u *ChannelMonitorUpdate) ClearGroupName() *ChannelMonitorUpdate { + _u.mutation.ClearGroupName() + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *ChannelMonitorUpdate) SetEnabled(v bool) *ChannelMonitorUpdate { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableEnabled(v *bool) *ChannelMonitorUpdate { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (_u *ChannelMonitorUpdate) SetIntervalSeconds(v int) *ChannelMonitorUpdate { + _u.mutation.ResetIntervalSeconds() + _u.mutation.SetIntervalSeconds(v) + return _u +} + +// SetNillableIntervalSeconds sets the "interval_seconds" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableIntervalSeconds(v *int) *ChannelMonitorUpdate { + if v != nil { + _u.SetIntervalSeconds(*v) + } + return _u +} + +// AddIntervalSeconds adds value to the "interval_seconds" field. +func (_u *ChannelMonitorUpdate) AddIntervalSeconds(v int) *ChannelMonitorUpdate { + _u.mutation.AddIntervalSeconds(v) + return _u +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (_u *ChannelMonitorUpdate) SetLastCheckedAt(v time.Time) *ChannelMonitorUpdate { + _u.mutation.SetLastCheckedAt(v) + return _u +} + +// SetNillableLastCheckedAt sets the "last_checked_at" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableLastCheckedAt(v *time.Time) *ChannelMonitorUpdate { + if v != nil { + _u.SetLastCheckedAt(*v) + } + return _u +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (_u *ChannelMonitorUpdate) ClearLastCheckedAt() *ChannelMonitorUpdate { + _u.mutation.ClearLastCheckedAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *ChannelMonitorUpdate) SetCreatedBy(v int64) *ChannelMonitorUpdate { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *ChannelMonitorUpdate) SetNillableCreatedBy(v *int64) *ChannelMonitorUpdate { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *ChannelMonitorUpdate) AddCreatedBy(v int64) *ChannelMonitorUpdate { + _u.mutation.AddCreatedBy(v) + return _u +} + +// AddHistoryIDs adds the "history" edge to the ChannelMonitorHistory entity by IDs. +func (_u *ChannelMonitorUpdate) AddHistoryIDs(ids ...int64) *ChannelMonitorUpdate { + _u.mutation.AddHistoryIDs(ids...) + return _u +} + +// AddHistory adds the "history" edges to the ChannelMonitorHistory entity. +func (_u *ChannelMonitorUpdate) AddHistory(v ...*ChannelMonitorHistory) *ChannelMonitorUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddHistoryIDs(ids...) +} + +// Mutation returns the ChannelMonitorMutation object of the builder. +func (_u *ChannelMonitorUpdate) Mutation() *ChannelMonitorMutation { + return _u.mutation +} + +// ClearHistory clears all "history" edges to the ChannelMonitorHistory entity. +func (_u *ChannelMonitorUpdate) ClearHistory() *ChannelMonitorUpdate { + _u.mutation.ClearHistory() + return _u +} + +// RemoveHistoryIDs removes the "history" edge to ChannelMonitorHistory entities by IDs. +func (_u *ChannelMonitorUpdate) RemoveHistoryIDs(ids ...int64) *ChannelMonitorUpdate { + _u.mutation.RemoveHistoryIDs(ids...) + return _u +} + +// RemoveHistory removes "history" edges to ChannelMonitorHistory entities. +func (_u *ChannelMonitorUpdate) RemoveHistory(v ...*ChannelMonitorHistory) *ChannelMonitorUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveHistoryIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ChannelMonitorUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ChannelMonitorUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ChannelMonitorUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := channelmonitor.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := channelmonitor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.name": %w`, err)} + } + } + if v, ok := _u.mutation.Provider(); ok { + if err := channelmonitor.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.provider": %w`, err)} + } + } + if v, ok := _u.mutation.Endpoint(); ok { + if err := channelmonitor.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.endpoint": %w`, err)} + } + } + if v, ok := _u.mutation.APIKeyEncrypted(); ok { + if err := channelmonitor.APIKeyEncryptedValidator(v); err != nil { + return &ValidationError{Name: "api_key_encrypted", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.api_key_encrypted": %w`, err)} + } + } + if v, ok := _u.mutation.PrimaryModel(); ok { + if err := channelmonitor.PrimaryModelValidator(v); err != nil { + return &ValidationError{Name: "primary_model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.primary_model": %w`, err)} + } + } + if v, ok := _u.mutation.GroupName(); ok { + if err := channelmonitor.GroupNameValidator(v); err != nil { + return &ValidationError{Name: "group_name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.group_name": %w`, err)} + } + } + if v, ok := _u.mutation.IntervalSeconds(); ok { + if err := channelmonitor.IntervalSecondsValidator(v); err != nil { + return &ValidationError{Name: "interval_seconds", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.interval_seconds": %w`, err)} + } + } + return nil +} + +func (_u *ChannelMonitorUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(channelmonitor.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(channelmonitor.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Provider(); ok { + _spec.SetField(channelmonitor.FieldProvider, field.TypeEnum, value) + } + if value, ok := _u.mutation.Endpoint(); ok { + _spec.SetField(channelmonitor.FieldEndpoint, field.TypeString, value) + } + if value, ok := _u.mutation.APIKeyEncrypted(); ok { + _spec.SetField(channelmonitor.FieldAPIKeyEncrypted, field.TypeString, value) + } + if value, ok := _u.mutation.PrimaryModel(); ok { + _spec.SetField(channelmonitor.FieldPrimaryModel, field.TypeString, value) + } + if value, ok := _u.mutation.ExtraModels(); ok { + _spec.SetField(channelmonitor.FieldExtraModels, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedExtraModels(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, channelmonitor.FieldExtraModels, value) + }) + } + if value, ok := _u.mutation.GroupName(); ok { + _spec.SetField(channelmonitor.FieldGroupName, field.TypeString, value) + } + if _u.mutation.GroupNameCleared() { + _spec.ClearField(channelmonitor.FieldGroupName, field.TypeString) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(channelmonitor.FieldEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.IntervalSeconds(); ok { + _spec.SetField(channelmonitor.FieldIntervalSeconds, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedIntervalSeconds(); ok { + _spec.AddField(channelmonitor.FieldIntervalSeconds, field.TypeInt, value) + } + if value, ok := _u.mutation.LastCheckedAt(); ok { + _spec.SetField(channelmonitor.FieldLastCheckedAt, field.TypeTime, value) + } + if _u.mutation.LastCheckedAtCleared() { + _spec.ClearField(channelmonitor.FieldLastCheckedAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(channelmonitor.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(channelmonitor.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.HistoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedHistoryIDs(); len(nodes) > 0 && !_u.mutation.HistoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.HistoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ChannelMonitorUpdateOne is the builder for updating a single ChannelMonitor entity. +type ChannelMonitorUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ChannelMonitorMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ChannelMonitorUpdateOne) SetUpdatedAt(v time.Time) *ChannelMonitorUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetName sets the "name" field. +func (_u *ChannelMonitorUpdateOne) SetName(v string) *ChannelMonitorUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableName(v *string) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProvider sets the "provider" field. +func (_u *ChannelMonitorUpdateOne) SetProvider(v channelmonitor.Provider) *ChannelMonitorUpdateOne { + _u.mutation.SetProvider(v) + return _u +} + +// SetNillableProvider sets the "provider" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableProvider(v *channelmonitor.Provider) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetProvider(*v) + } + return _u +} + +// SetEndpoint sets the "endpoint" field. +func (_u *ChannelMonitorUpdateOne) SetEndpoint(v string) *ChannelMonitorUpdateOne { + _u.mutation.SetEndpoint(v) + return _u +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableEndpoint(v *string) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetEndpoint(*v) + } + return _u +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (_u *ChannelMonitorUpdateOne) SetAPIKeyEncrypted(v string) *ChannelMonitorUpdateOne { + _u.mutation.SetAPIKeyEncrypted(v) + return _u +} + +// SetNillableAPIKeyEncrypted sets the "api_key_encrypted" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableAPIKeyEncrypted(v *string) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetAPIKeyEncrypted(*v) + } + return _u +} + +// SetPrimaryModel sets the "primary_model" field. +func (_u *ChannelMonitorUpdateOne) SetPrimaryModel(v string) *ChannelMonitorUpdateOne { + _u.mutation.SetPrimaryModel(v) + return _u +} + +// SetNillablePrimaryModel sets the "primary_model" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillablePrimaryModel(v *string) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetPrimaryModel(*v) + } + return _u +} + +// SetExtraModels sets the "extra_models" field. +func (_u *ChannelMonitorUpdateOne) SetExtraModels(v []string) *ChannelMonitorUpdateOne { + _u.mutation.SetExtraModels(v) + return _u +} + +// AppendExtraModels appends value to the "extra_models" field. +func (_u *ChannelMonitorUpdateOne) AppendExtraModels(v []string) *ChannelMonitorUpdateOne { + _u.mutation.AppendExtraModels(v) + return _u +} + +// SetGroupName sets the "group_name" field. +func (_u *ChannelMonitorUpdateOne) SetGroupName(v string) *ChannelMonitorUpdateOne { + _u.mutation.SetGroupName(v) + return _u +} + +// SetNillableGroupName sets the "group_name" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableGroupName(v *string) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetGroupName(*v) + } + return _u +} + +// ClearGroupName clears the value of the "group_name" field. +func (_u *ChannelMonitorUpdateOne) ClearGroupName() *ChannelMonitorUpdateOne { + _u.mutation.ClearGroupName() + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *ChannelMonitorUpdateOne) SetEnabled(v bool) *ChannelMonitorUpdateOne { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableEnabled(v *bool) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (_u *ChannelMonitorUpdateOne) SetIntervalSeconds(v int) *ChannelMonitorUpdateOne { + _u.mutation.ResetIntervalSeconds() + _u.mutation.SetIntervalSeconds(v) + return _u +} + +// SetNillableIntervalSeconds sets the "interval_seconds" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableIntervalSeconds(v *int) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetIntervalSeconds(*v) + } + return _u +} + +// AddIntervalSeconds adds value to the "interval_seconds" field. +func (_u *ChannelMonitorUpdateOne) AddIntervalSeconds(v int) *ChannelMonitorUpdateOne { + _u.mutation.AddIntervalSeconds(v) + return _u +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (_u *ChannelMonitorUpdateOne) SetLastCheckedAt(v time.Time) *ChannelMonitorUpdateOne { + _u.mutation.SetLastCheckedAt(v) + return _u +} + +// SetNillableLastCheckedAt sets the "last_checked_at" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableLastCheckedAt(v *time.Time) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetLastCheckedAt(*v) + } + return _u +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (_u *ChannelMonitorUpdateOne) ClearLastCheckedAt() *ChannelMonitorUpdateOne { + _u.mutation.ClearLastCheckedAt() + return _u +} + +// SetCreatedBy sets the "created_by" field. +func (_u *ChannelMonitorUpdateOne) SetCreatedBy(v int64) *ChannelMonitorUpdateOne { + _u.mutation.ResetCreatedBy() + _u.mutation.SetCreatedBy(v) + return _u +} + +// SetNillableCreatedBy sets the "created_by" field if the given value is not nil. +func (_u *ChannelMonitorUpdateOne) SetNillableCreatedBy(v *int64) *ChannelMonitorUpdateOne { + if v != nil { + _u.SetCreatedBy(*v) + } + return _u +} + +// AddCreatedBy adds value to the "created_by" field. +func (_u *ChannelMonitorUpdateOne) AddCreatedBy(v int64) *ChannelMonitorUpdateOne { + _u.mutation.AddCreatedBy(v) + return _u +} + +// AddHistoryIDs adds the "history" edge to the ChannelMonitorHistory entity by IDs. +func (_u *ChannelMonitorUpdateOne) AddHistoryIDs(ids ...int64) *ChannelMonitorUpdateOne { + _u.mutation.AddHistoryIDs(ids...) + return _u +} + +// AddHistory adds the "history" edges to the ChannelMonitorHistory entity. +func (_u *ChannelMonitorUpdateOne) AddHistory(v ...*ChannelMonitorHistory) *ChannelMonitorUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddHistoryIDs(ids...) +} + +// Mutation returns the ChannelMonitorMutation object of the builder. +func (_u *ChannelMonitorUpdateOne) Mutation() *ChannelMonitorMutation { + return _u.mutation +} + +// ClearHistory clears all "history" edges to the ChannelMonitorHistory entity. +func (_u *ChannelMonitorUpdateOne) ClearHistory() *ChannelMonitorUpdateOne { + _u.mutation.ClearHistory() + return _u +} + +// RemoveHistoryIDs removes the "history" edge to ChannelMonitorHistory entities by IDs. +func (_u *ChannelMonitorUpdateOne) RemoveHistoryIDs(ids ...int64) *ChannelMonitorUpdateOne { + _u.mutation.RemoveHistoryIDs(ids...) + return _u +} + +// RemoveHistory removes "history" edges to ChannelMonitorHistory entities. +func (_u *ChannelMonitorUpdateOne) RemoveHistory(v ...*ChannelMonitorHistory) *ChannelMonitorUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveHistoryIDs(ids...) +} + +// Where appends a list predicates to the ChannelMonitorUpdate builder. +func (_u *ChannelMonitorUpdateOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ChannelMonitorUpdateOne) Select(field string, fields ...string) *ChannelMonitorUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated ChannelMonitor entity. +func (_u *ChannelMonitorUpdateOne) Save(ctx context.Context) (*ChannelMonitor, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorUpdateOne) SaveX(ctx context.Context) *ChannelMonitor { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ChannelMonitorUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ChannelMonitorUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := channelmonitor.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := channelmonitor.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.name": %w`, err)} + } + } + if v, ok := _u.mutation.Provider(); ok { + if err := channelmonitor.ProviderValidator(v); err != nil { + return &ValidationError{Name: "provider", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.provider": %w`, err)} + } + } + if v, ok := _u.mutation.Endpoint(); ok { + if err := channelmonitor.EndpointValidator(v); err != nil { + return &ValidationError{Name: "endpoint", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.endpoint": %w`, err)} + } + } + if v, ok := _u.mutation.APIKeyEncrypted(); ok { + if err := channelmonitor.APIKeyEncryptedValidator(v); err != nil { + return &ValidationError{Name: "api_key_encrypted", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.api_key_encrypted": %w`, err)} + } + } + if v, ok := _u.mutation.PrimaryModel(); ok { + if err := channelmonitor.PrimaryModelValidator(v); err != nil { + return &ValidationError{Name: "primary_model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.primary_model": %w`, err)} + } + } + if v, ok := _u.mutation.GroupName(); ok { + if err := channelmonitor.GroupNameValidator(v); err != nil { + return &ValidationError{Name: "group_name", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.group_name": %w`, err)} + } + } + if v, ok := _u.mutation.IntervalSeconds(); ok { + if err := channelmonitor.IntervalSecondsValidator(v); err != nil { + return &ValidationError{Name: "interval_seconds", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitor.interval_seconds": %w`, err)} + } + } + return nil +} + +func (_u *ChannelMonitorUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitor, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitor.Table, channelmonitor.Columns, sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitor.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitor.FieldID) + for _, f := range fields { + if !channelmonitor.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != channelmonitor.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(channelmonitor.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(channelmonitor.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Provider(); ok { + _spec.SetField(channelmonitor.FieldProvider, field.TypeEnum, value) + } + if value, ok := _u.mutation.Endpoint(); ok { + _spec.SetField(channelmonitor.FieldEndpoint, field.TypeString, value) + } + if value, ok := _u.mutation.APIKeyEncrypted(); ok { + _spec.SetField(channelmonitor.FieldAPIKeyEncrypted, field.TypeString, value) + } + if value, ok := _u.mutation.PrimaryModel(); ok { + _spec.SetField(channelmonitor.FieldPrimaryModel, field.TypeString, value) + } + if value, ok := _u.mutation.ExtraModels(); ok { + _spec.SetField(channelmonitor.FieldExtraModels, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedExtraModels(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, channelmonitor.FieldExtraModels, value) + }) + } + if value, ok := _u.mutation.GroupName(); ok { + _spec.SetField(channelmonitor.FieldGroupName, field.TypeString, value) + } + if _u.mutation.GroupNameCleared() { + _spec.ClearField(channelmonitor.FieldGroupName, field.TypeString) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(channelmonitor.FieldEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.IntervalSeconds(); ok { + _spec.SetField(channelmonitor.FieldIntervalSeconds, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedIntervalSeconds(); ok { + _spec.AddField(channelmonitor.FieldIntervalSeconds, field.TypeInt, value) + } + if value, ok := _u.mutation.LastCheckedAt(); ok { + _spec.SetField(channelmonitor.FieldLastCheckedAt, field.TypeTime, value) + } + if _u.mutation.LastCheckedAtCleared() { + _spec.ClearField(channelmonitor.FieldLastCheckedAt, field.TypeTime) + } + if value, ok := _u.mutation.CreatedBy(); ok { + _spec.SetField(channelmonitor.FieldCreatedBy, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedCreatedBy(); ok { + _spec.AddField(channelmonitor.FieldCreatedBy, field.TypeInt64, value) + } + if _u.mutation.HistoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedHistoryIDs(); len(nodes) > 0 && !_u.mutation.HistoryCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.HistoryIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.HistoryTable, + Columns: []string{channelmonitor.HistoryColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ChannelMonitor{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitor.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/channelmonitorhistory.go b/backend/ent/channelmonitorhistory.go new file mode 100644 index 00000000..70dde542 --- /dev/null +++ b/backend/ent/channelmonitorhistory.go @@ -0,0 +1,207 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" +) + +// ChannelMonitorHistory is the model entity for the ChannelMonitorHistory schema. +type ChannelMonitorHistory struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // MonitorID holds the value of the "monitor_id" field. + MonitorID int64 `json:"monitor_id,omitempty"` + // Model holds the value of the "model" field. + Model string `json:"model,omitempty"` + // Status holds the value of the "status" field. + Status channelmonitorhistory.Status `json:"status,omitempty"` + // LatencyMs holds the value of the "latency_ms" field. + LatencyMs *int `json:"latency_ms,omitempty"` + // PingLatencyMs holds the value of the "ping_latency_ms" field. + PingLatencyMs *int `json:"ping_latency_ms,omitempty"` + // Message holds the value of the "message" field. + Message string `json:"message,omitempty"` + // CheckedAt holds the value of the "checked_at" field. + CheckedAt time.Time `json:"checked_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ChannelMonitorHistoryQuery when eager-loading is set. + Edges ChannelMonitorHistoryEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ChannelMonitorHistoryEdges holds the relations/edges for other nodes in the graph. +type ChannelMonitorHistoryEdges struct { + // Monitor holds the value of the monitor edge. + Monitor *ChannelMonitor `json:"monitor,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// MonitorOrErr returns the Monitor value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ChannelMonitorHistoryEdges) MonitorOrErr() (*ChannelMonitor, error) { + if e.Monitor != nil { + return e.Monitor, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: channelmonitor.Label} + } + return nil, &NotLoadedError{edge: "monitor"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case channelmonitorhistory.FieldID, channelmonitorhistory.FieldMonitorID, channelmonitorhistory.FieldLatencyMs, channelmonitorhistory.FieldPingLatencyMs: + values[i] = new(sql.NullInt64) + case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage: + values[i] = new(sql.NullString) + case channelmonitorhistory.FieldCheckedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ChannelMonitorHistory fields. +func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case channelmonitorhistory.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case channelmonitorhistory.FieldMonitorID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field monitor_id", values[i]) + } else if value.Valid { + _m.MonitorID = value.Int64 + } + case channelmonitorhistory.FieldModel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field model", values[i]) + } else if value.Valid { + _m.Model = value.String + } + case channelmonitorhistory.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = channelmonitorhistory.Status(value.String) + } + case channelmonitorhistory.FieldLatencyMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field latency_ms", values[i]) + } else if value.Valid { + _m.LatencyMs = new(int) + *_m.LatencyMs = int(value.Int64) + } + case channelmonitorhistory.FieldPingLatencyMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field ping_latency_ms", values[i]) + } else if value.Valid { + _m.PingLatencyMs = new(int) + *_m.PingLatencyMs = int(value.Int64) + } + case channelmonitorhistory.FieldMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field message", values[i]) + } else if value.Valid { + _m.Message = value.String + } + case channelmonitorhistory.FieldCheckedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field checked_at", values[i]) + } else if value.Valid { + _m.CheckedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorHistory. +// This includes values selected through modifiers, order, etc. +func (_m *ChannelMonitorHistory) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryMonitor queries the "monitor" edge of the ChannelMonitorHistory entity. +func (_m *ChannelMonitorHistory) QueryMonitor() *ChannelMonitorQuery { + return NewChannelMonitorHistoryClient(_m.config).QueryMonitor(_m) +} + +// Update returns a builder for updating this ChannelMonitorHistory. +// Note that you need to call ChannelMonitorHistory.Unwrap() before calling this method if this ChannelMonitorHistory +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *ChannelMonitorHistory) Update() *ChannelMonitorHistoryUpdateOne { + return NewChannelMonitorHistoryClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the ChannelMonitorHistory entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *ChannelMonitorHistory) Unwrap() *ChannelMonitorHistory { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: ChannelMonitorHistory is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *ChannelMonitorHistory) String() string { + var builder strings.Builder + builder.WriteString("ChannelMonitorHistory(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("monitor_id=") + builder.WriteString(fmt.Sprintf("%v", _m.MonitorID)) + builder.WriteString(", ") + builder.WriteString("model=") + builder.WriteString(_m.Model) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", _m.Status)) + builder.WriteString(", ") + if v := _m.LatencyMs; v != nil { + builder.WriteString("latency_ms=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.PingLatencyMs; v != nil { + builder.WriteString("ping_latency_ms=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("message=") + builder.WriteString(_m.Message) + builder.WriteString(", ") + builder.WriteString("checked_at=") + builder.WriteString(_m.CheckedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// ChannelMonitorHistories is a parsable slice of ChannelMonitorHistory. +type ChannelMonitorHistories []*ChannelMonitorHistory diff --git a/backend/ent/channelmonitorhistory/channelmonitorhistory.go b/backend/ent/channelmonitorhistory/channelmonitorhistory.go new file mode 100644 index 00000000..6a9dc006 --- /dev/null +++ b/backend/ent/channelmonitorhistory/channelmonitorhistory.go @@ -0,0 +1,158 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitorhistory + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the channelmonitorhistory type in the database. + Label = "channel_monitor_history" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldMonitorID holds the string denoting the monitor_id field in the database. + FieldMonitorID = "monitor_id" + // FieldModel holds the string denoting the model field in the database. + FieldModel = "model" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldLatencyMs holds the string denoting the latency_ms field in the database. + FieldLatencyMs = "latency_ms" + // FieldPingLatencyMs holds the string denoting the ping_latency_ms field in the database. + FieldPingLatencyMs = "ping_latency_ms" + // FieldMessage holds the string denoting the message field in the database. + FieldMessage = "message" + // FieldCheckedAt holds the string denoting the checked_at field in the database. + FieldCheckedAt = "checked_at" + // EdgeMonitor holds the string denoting the monitor edge name in mutations. + EdgeMonitor = "monitor" + // Table holds the table name of the channelmonitorhistory in the database. + Table = "channel_monitor_histories" + // MonitorTable is the table that holds the monitor relation/edge. + MonitorTable = "channel_monitor_histories" + // MonitorInverseTable is the table name for the ChannelMonitor entity. + // It exists in this package in order to avoid circular dependency with the "channelmonitor" package. + MonitorInverseTable = "channel_monitors" + // MonitorColumn is the table column denoting the monitor relation/edge. + MonitorColumn = "monitor_id" +) + +// Columns holds all SQL columns for channelmonitorhistory fields. +var Columns = []string{ + FieldID, + FieldMonitorID, + FieldModel, + FieldStatus, + FieldLatencyMs, + FieldPingLatencyMs, + FieldMessage, + FieldCheckedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // ModelValidator is a validator for the "model" field. It is called by the builders before save. + ModelValidator func(string) error + // DefaultMessage holds the default value on creation for the "message" field. + DefaultMessage string + // MessageValidator is a validator for the "message" field. It is called by the builders before save. + MessageValidator func(string) error + // DefaultCheckedAt holds the default value on creation for the "checked_at" field. + DefaultCheckedAt func() time.Time +) + +// Status defines the type for the "status" enum field. +type Status string + +// Status values. +const ( + StatusOperational Status = "operational" + StatusDegraded Status = "degraded" + StatusFailed Status = "failed" + StatusError Status = "error" +) + +func (s Status) String() string { + return string(s) +} + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s Status) error { + switch s { + case StatusOperational, StatusDegraded, StatusFailed, StatusError: + return nil + default: + return fmt.Errorf("channelmonitorhistory: invalid enum value for status field: %q", s) + } +} + +// OrderOption defines the ordering options for the ChannelMonitorHistory queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByMonitorID orders the results by the monitor_id field. +func ByMonitorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonitorID, opts...).ToFunc() +} + +// ByModel orders the results by the model field. +func ByModel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldModel, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByLatencyMs orders the results by the latency_ms field. +func ByLatencyMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLatencyMs, opts...).ToFunc() +} + +// ByPingLatencyMs orders the results by the ping_latency_ms field. +func ByPingLatencyMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPingLatencyMs, opts...).ToFunc() +} + +// ByMessage orders the results by the message field. +func ByMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMessage, opts...).ToFunc() +} + +// ByCheckedAt orders the results by the checked_at field. +func ByCheckedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCheckedAt, opts...).ToFunc() +} + +// ByMonitorField orders the results by monitor field. +func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...)) + } +} +func newMonitorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MonitorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn), + ) +} diff --git a/backend/ent/channelmonitorhistory/where.go b/backend/ent/channelmonitorhistory/where.go new file mode 100644 index 00000000..afa73f35 --- /dev/null +++ b/backend/ent/channelmonitorhistory/where.go @@ -0,0 +1,444 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitorhistory + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id)) +} + +// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ. +func MonitorID(v int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) +} + +// Model applies equality check predicate on the "model" field. It's identical to ModelEQ. +func Model(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v)) +} + +// LatencyMs applies equality check predicate on the "latency_ms" field. It's identical to LatencyMsEQ. +func LatencyMs(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v)) +} + +// PingLatencyMs applies equality check predicate on the "ping_latency_ms" field. It's identical to PingLatencyMsEQ. +func PingLatencyMs(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v)) +} + +// Message applies equality check predicate on the "message" field. It's identical to MessageEQ. +func Message(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v)) +} + +// CheckedAt applies equality check predicate on the "checked_at" field. It's identical to CheckedAtEQ. +func CheckedAt(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v)) +} + +// MonitorIDEQ applies the EQ predicate on the "monitor_id" field. +func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) +} + +// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field. +func MonitorIDNEQ(v int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMonitorID, v)) +} + +// MonitorIDIn applies the In predicate on the "monitor_id" field. +func MonitorIDIn(vs ...int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMonitorID, vs...)) +} + +// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field. +func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMonitorID, vs...)) +} + +// ModelEQ applies the EQ predicate on the "model" field. +func ModelEQ(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldModel, v)) +} + +// ModelNEQ applies the NEQ predicate on the "model" field. +func ModelNEQ(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldModel, v)) +} + +// ModelIn applies the In predicate on the "model" field. +func ModelIn(vs ...string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldModel, vs...)) +} + +// ModelNotIn applies the NotIn predicate on the "model" field. +func ModelNotIn(vs ...string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldModel, vs...)) +} + +// ModelGT applies the GT predicate on the "model" field. +func ModelGT(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldModel, v)) +} + +// ModelGTE applies the GTE predicate on the "model" field. +func ModelGTE(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldModel, v)) +} + +// ModelLT applies the LT predicate on the "model" field. +func ModelLT(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldModel, v)) +} + +// ModelLTE applies the LTE predicate on the "model" field. +func ModelLTE(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldModel, v)) +} + +// ModelContains applies the Contains predicate on the "model" field. +func ModelContains(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldContains(FieldModel, v)) +} + +// ModelHasPrefix applies the HasPrefix predicate on the "model" field. +func ModelHasPrefix(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldModel, v)) +} + +// ModelHasSuffix applies the HasSuffix predicate on the "model" field. +func ModelHasSuffix(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldModel, v)) +} + +// ModelEqualFold applies the EqualFold predicate on the "model" field. +func ModelEqualFold(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldModel, v)) +} + +// ModelContainsFold applies the ContainsFold predicate on the "model" field. +func ModelContainsFold(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldModel, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v Status) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v Status) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...Status) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...Status) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldStatus, vs...)) +} + +// LatencyMsEQ applies the EQ predicate on the "latency_ms" field. +func LatencyMsEQ(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldLatencyMs, v)) +} + +// LatencyMsNEQ applies the NEQ predicate on the "latency_ms" field. +func LatencyMsNEQ(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldLatencyMs, v)) +} + +// LatencyMsIn applies the In predicate on the "latency_ms" field. +func LatencyMsIn(vs ...int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldLatencyMs, vs...)) +} + +// LatencyMsNotIn applies the NotIn predicate on the "latency_ms" field. +func LatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldLatencyMs, vs...)) +} + +// LatencyMsGT applies the GT predicate on the "latency_ms" field. +func LatencyMsGT(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldLatencyMs, v)) +} + +// LatencyMsGTE applies the GTE predicate on the "latency_ms" field. +func LatencyMsGTE(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldLatencyMs, v)) +} + +// LatencyMsLT applies the LT predicate on the "latency_ms" field. +func LatencyMsLT(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldLatencyMs, v)) +} + +// LatencyMsLTE applies the LTE predicate on the "latency_ms" field. +func LatencyMsLTE(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldLatencyMs, v)) +} + +// LatencyMsIsNil applies the IsNil predicate on the "latency_ms" field. +func LatencyMsIsNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldLatencyMs)) +} + +// LatencyMsNotNil applies the NotNil predicate on the "latency_ms" field. +func LatencyMsNotNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldLatencyMs)) +} + +// PingLatencyMsEQ applies the EQ predicate on the "ping_latency_ms" field. +func PingLatencyMsEQ(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldPingLatencyMs, v)) +} + +// PingLatencyMsNEQ applies the NEQ predicate on the "ping_latency_ms" field. +func PingLatencyMsNEQ(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldPingLatencyMs, v)) +} + +// PingLatencyMsIn applies the In predicate on the "ping_latency_ms" field. +func PingLatencyMsIn(vs ...int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldPingLatencyMs, vs...)) +} + +// PingLatencyMsNotIn applies the NotIn predicate on the "ping_latency_ms" field. +func PingLatencyMsNotIn(vs ...int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldPingLatencyMs, vs...)) +} + +// PingLatencyMsGT applies the GT predicate on the "ping_latency_ms" field. +func PingLatencyMsGT(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldPingLatencyMs, v)) +} + +// PingLatencyMsGTE applies the GTE predicate on the "ping_latency_ms" field. +func PingLatencyMsGTE(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldPingLatencyMs, v)) +} + +// PingLatencyMsLT applies the LT predicate on the "ping_latency_ms" field. +func PingLatencyMsLT(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldPingLatencyMs, v)) +} + +// PingLatencyMsLTE applies the LTE predicate on the "ping_latency_ms" field. +func PingLatencyMsLTE(v int) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldPingLatencyMs, v)) +} + +// PingLatencyMsIsNil applies the IsNil predicate on the "ping_latency_ms" field. +func PingLatencyMsIsNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldPingLatencyMs)) +} + +// PingLatencyMsNotNil applies the NotNil predicate on the "ping_latency_ms" field. +func PingLatencyMsNotNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldPingLatencyMs)) +} + +// MessageEQ applies the EQ predicate on the "message" field. +func MessageEQ(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMessage, v)) +} + +// MessageNEQ applies the NEQ predicate on the "message" field. +func MessageNEQ(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldMessage, v)) +} + +// MessageIn applies the In predicate on the "message" field. +func MessageIn(vs ...string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldMessage, vs...)) +} + +// MessageNotIn applies the NotIn predicate on the "message" field. +func MessageNotIn(vs ...string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldMessage, vs...)) +} + +// MessageGT applies the GT predicate on the "message" field. +func MessageGT(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldMessage, v)) +} + +// MessageGTE applies the GTE predicate on the "message" field. +func MessageGTE(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldMessage, v)) +} + +// MessageLT applies the LT predicate on the "message" field. +func MessageLT(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldMessage, v)) +} + +// MessageLTE applies the LTE predicate on the "message" field. +func MessageLTE(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldMessage, v)) +} + +// MessageContains applies the Contains predicate on the "message" field. +func MessageContains(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldContains(FieldMessage, v)) +} + +// MessageHasPrefix applies the HasPrefix predicate on the "message" field. +func MessageHasPrefix(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldHasPrefix(FieldMessage, v)) +} + +// MessageHasSuffix applies the HasSuffix predicate on the "message" field. +func MessageHasSuffix(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldHasSuffix(FieldMessage, v)) +} + +// MessageIsNil applies the IsNil predicate on the "message" field. +func MessageIsNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldMessage)) +} + +// MessageNotNil applies the NotNil predicate on the "message" field. +func MessageNotNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldMessage)) +} + +// MessageEqualFold applies the EqualFold predicate on the "message" field. +func MessageEqualFold(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEqualFold(FieldMessage, v)) +} + +// MessageContainsFold applies the ContainsFold predicate on the "message" field. +func MessageContainsFold(v string) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldContainsFold(FieldMessage, v)) +} + +// CheckedAtEQ applies the EQ predicate on the "checked_at" field. +func CheckedAtEQ(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v)) +} + +// CheckedAtNEQ applies the NEQ predicate on the "checked_at" field. +func CheckedAtNEQ(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldCheckedAt, v)) +} + +// CheckedAtIn applies the In predicate on the "checked_at" field. +func CheckedAtIn(vs ...time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldCheckedAt, vs...)) +} + +// CheckedAtNotIn applies the NotIn predicate on the "checked_at" field. +func CheckedAtNotIn(vs ...time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldCheckedAt, vs...)) +} + +// CheckedAtGT applies the GT predicate on the "checked_at" field. +func CheckedAtGT(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldCheckedAt, v)) +} + +// CheckedAtGTE applies the GTE predicate on the "checked_at" field. +func CheckedAtGTE(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldCheckedAt, v)) +} + +// CheckedAtLT applies the LT predicate on the "checked_at" field. +func CheckedAtLT(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldCheckedAt, v)) +} + +// CheckedAtLTE applies the LTE predicate on the "checked_at" field. +func CheckedAtLTE(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldCheckedAt, v)) +} + +// HasMonitor applies the HasEdge predicate on the "monitor" edge. +func HasMonitor() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates). +func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(func(s *sql.Selector) { + step := newMonitorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ChannelMonitorHistory) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.NotPredicates(p)) +} diff --git a/backend/ent/channelmonitorhistory_create.go b/backend/ent/channelmonitorhistory_create.go new file mode 100644 index 00000000..71034865 --- /dev/null +++ b/backend/ent/channelmonitorhistory_create.go @@ -0,0 +1,947 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" +) + +// ChannelMonitorHistoryCreate is the builder for creating a ChannelMonitorHistory entity. +type ChannelMonitorHistoryCreate struct { + config + mutation *ChannelMonitorHistoryMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetMonitorID sets the "monitor_id" field. +func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate { + _c.mutation.SetMonitorID(v) + return _c +} + +// SetModel sets the "model" field. +func (_c *ChannelMonitorHistoryCreate) SetModel(v string) *ChannelMonitorHistoryCreate { + _c.mutation.SetModel(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *ChannelMonitorHistoryCreate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetLatencyMs sets the "latency_ms" field. +func (_c *ChannelMonitorHistoryCreate) SetLatencyMs(v int) *ChannelMonitorHistoryCreate { + _c.mutation.SetLatencyMs(v) + return _c +} + +// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil. +func (_c *ChannelMonitorHistoryCreate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryCreate { + if v != nil { + _c.SetLatencyMs(*v) + } + return _c +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (_c *ChannelMonitorHistoryCreate) SetPingLatencyMs(v int) *ChannelMonitorHistoryCreate { + _c.mutation.SetPingLatencyMs(v) + return _c +} + +// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil. +func (_c *ChannelMonitorHistoryCreate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryCreate { + if v != nil { + _c.SetPingLatencyMs(*v) + } + return _c +} + +// SetMessage sets the "message" field. +func (_c *ChannelMonitorHistoryCreate) SetMessage(v string) *ChannelMonitorHistoryCreate { + _c.mutation.SetMessage(v) + return _c +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (_c *ChannelMonitorHistoryCreate) SetNillableMessage(v *string) *ChannelMonitorHistoryCreate { + if v != nil { + _c.SetMessage(*v) + } + return _c +} + +// SetCheckedAt sets the "checked_at" field. +func (_c *ChannelMonitorHistoryCreate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryCreate { + _c.mutation.SetCheckedAt(v) + return _c +} + +// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil. +func (_c *ChannelMonitorHistoryCreate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryCreate { + if v != nil { + _c.SetCheckedAt(*v) + } + return _c +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_c *ChannelMonitorHistoryCreate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryCreate { + return _c.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorHistoryMutation object of the builder. +func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation { + return _c.mutation +} + +// Save creates the ChannelMonitorHistory in the database. +func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ChannelMonitorHistoryCreate) SaveX(ctx context.Context) *ChannelMonitorHistory { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorHistoryCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ChannelMonitorHistoryCreate) defaults() { + if _, ok := _c.mutation.Message(); !ok { + v := channelmonitorhistory.DefaultMessage + _c.mutation.SetMessage(v) + } + if _, ok := _c.mutation.CheckedAt(); !ok { + v := channelmonitorhistory.DefaultCheckedAt() + _c.mutation.SetCheckedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ChannelMonitorHistoryCreate) check() error { + if _, ok := _c.mutation.MonitorID(); !ok { + return &ValidationError{Name: "monitor_id", err: errors.New(`ent: missing required field "ChannelMonitorHistory.monitor_id"`)} + } + if _, ok := _c.mutation.Model(); !ok { + return &ValidationError{Name: "model", err: errors.New(`ent: missing required field "ChannelMonitorHistory.model"`)} + } + if v, ok := _c.mutation.Model(); ok { + if err := channelmonitorhistory.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "ChannelMonitorHistory.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := channelmonitorhistory.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)} + } + } + if v, ok := _c.mutation.Message(); ok { + if err := channelmonitorhistory.MessageValidator(v); err != nil { + return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)} + } + } + if _, ok := _c.mutation.CheckedAt(); !ok { + return &ValidationError{Name: "checked_at", err: errors.New(`ent: missing required field "ChannelMonitorHistory.checked_at"`)} + } + if len(_c.mutation.MonitorIDs()) == 0 { + return &ValidationError{Name: "monitor", err: errors.New(`ent: missing required edge "ChannelMonitorHistory.monitor"`)} + } + return nil +} + +func (_c *ChannelMonitorHistoryCreate) sqlSave(ctx context.Context) (*ChannelMonitorHistory, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sqlgraph.CreateSpec) { + var ( + _node = &ChannelMonitorHistory{config: _c.config} + _spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Model(); ok { + _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) + _node.Model = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := _c.mutation.LatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value) + _node.LatencyMs = &value + } + if value, ok := _c.mutation.PingLatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value) + _node.PingLatencyMs = &value + } + if value, ok := _c.mutation.Message(); ok { + _spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value) + _node.Message = value + } + if value, ok := _c.mutation.CheckedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value) + _node.CheckedAt = value + } + if nodes := _c.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitorhistory.MonitorTable, + Columns: []string{channelmonitorhistory.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.MonitorID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitorHistory.Create(). +// SetMonitorID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorHistoryUpsert) { +// SetMonitorID(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne { + _c.conflict = opts + return &ChannelMonitorHistoryUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorHistoryCreate) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorHistoryUpsertOne{ + create: _c, + } +} + +type ( + // ChannelMonitorHistoryUpsertOne is the builder for "upsert"-ing + // one ChannelMonitorHistory node. + ChannelMonitorHistoryUpsertOne struct { + create *ChannelMonitorHistoryCreate + } + + // ChannelMonitorHistoryUpsert is the "OnConflict" setter. + ChannelMonitorHistoryUpsert struct { + *sql.UpdateSet + } +) + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldMonitorID, v) + return u +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateMonitorID() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldMonitorID) + return u +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorHistoryUpsert) SetModel(v string) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldModel, v) + return u +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateModel() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldModel) + return u +} + +// SetStatus sets the "status" field. +func (u *ChannelMonitorHistoryUpsert) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateStatus() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldStatus) + return u +} + +// SetLatencyMs sets the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) SetLatencyMs(v int) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldLatencyMs, v) + return u +} + +// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateLatencyMs() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldLatencyMs) + return u +} + +// AddLatencyMs adds v to the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) AddLatencyMs(v int) *ChannelMonitorHistoryUpsert { + u.Add(channelmonitorhistory.FieldLatencyMs, v) + return u +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) ClearLatencyMs() *ChannelMonitorHistoryUpsert { + u.SetNull(channelmonitorhistory.FieldLatencyMs) + return u +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldPingLatencyMs, v) + return u +} + +// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldPingLatencyMs) + return u +} + +// AddPingLatencyMs adds v to the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsert { + u.Add(channelmonitorhistory.FieldPingLatencyMs, v) + return u +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsert) ClearPingLatencyMs() *ChannelMonitorHistoryUpsert { + u.SetNull(channelmonitorhistory.FieldPingLatencyMs) + return u +} + +// SetMessage sets the "message" field. +func (u *ChannelMonitorHistoryUpsert) SetMessage(v string) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldMessage, v) + return u +} + +// UpdateMessage sets the "message" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateMessage() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldMessage) + return u +} + +// ClearMessage clears the value of the "message" field. +func (u *ChannelMonitorHistoryUpsert) ClearMessage() *ChannelMonitorHistoryUpsert { + u.SetNull(channelmonitorhistory.FieldMessage) + return u +} + +// SetCheckedAt sets the "checked_at" field. +func (u *ChannelMonitorHistoryUpsert) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldCheckedAt, v) + return u +} + +// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateCheckedAt() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldCheckedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorHistoryUpsertOne) UpdateNewValues() *ChannelMonitorHistoryUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorHistoryUpsertOne) Ignore() *ChannelMonitorHistoryUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorHistoryUpsertOne) DoNothing() *ChannelMonitorHistoryUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreate.OnConflict +// documentation for more info. +func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorHistoryUpsert{UpdateSet: update}) + })) + return u +} + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetMonitorID(v) + }) +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateMonitorID() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateMonitorID() + }) +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorHistoryUpsertOne) SetModel(v string) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateModel() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateModel() + }) +} + +// SetStatus sets the "status" field. +func (u *ChannelMonitorHistoryUpsertOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateStatus() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateStatus() + }) +} + +// SetLatencyMs sets the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetLatencyMs(v) + }) +} + +// AddLatencyMs adds v to the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.AddLatencyMs(v) + }) +} + +// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateLatencyMs() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateLatencyMs() + }) +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) ClearLatencyMs() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearLatencyMs() + }) +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetPingLatencyMs(v) + }) +} + +// AddPingLatencyMs adds v to the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.AddPingLatencyMs(v) + }) +} + +// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdatePingLatencyMs() + }) +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearPingLatencyMs() + }) +} + +// SetMessage sets the "message" field. +func (u *ChannelMonitorHistoryUpsertOne) SetMessage(v string) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetMessage(v) + }) +} + +// UpdateMessage sets the "message" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateMessage() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateMessage() + }) +} + +// ClearMessage clears the value of the "message" field. +func (u *ChannelMonitorHistoryUpsertOne) ClearMessage() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearMessage() + }) +} + +// SetCheckedAt sets the "checked_at" field. +func (u *ChannelMonitorHistoryUpsertOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetCheckedAt(v) + }) +} + +// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateCheckedAt() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateCheckedAt() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorHistoryUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorHistoryCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorHistoryUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ChannelMonitorHistoryUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ChannelMonitorHistoryUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ChannelMonitorHistoryCreateBulk is the builder for creating many ChannelMonitorHistory entities in bulk. +type ChannelMonitorHistoryCreateBulk struct { + config + err error + builders []*ChannelMonitorHistoryCreate + conflict []sql.ConflictOption +} + +// Save creates the ChannelMonitorHistory entities in the database. +func (_c *ChannelMonitorHistoryCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorHistory, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*ChannelMonitorHistory, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ChannelMonitorHistoryMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ChannelMonitorHistoryCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorHistory { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorHistoryCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitorHistory.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorHistoryUpsert) { +// SetMonitorID(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk { + _c.conflict = opts + return &ChannelMonitorHistoryUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorHistoryCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorHistoryUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorHistoryUpsertBulk{ + create: _c, + } +} + +// ChannelMonitorHistoryUpsertBulk is the builder for "upsert"-ing +// a bulk of ChannelMonitorHistory nodes. +type ChannelMonitorHistoryUpsertBulk struct { + create *ChannelMonitorHistoryCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorHistoryUpsertBulk) UpdateNewValues() *ChannelMonitorHistoryUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitorHistory.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorHistoryUpsertBulk) Ignore() *ChannelMonitorHistoryUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorHistoryUpsertBulk) DoNothing() *ChannelMonitorHistoryUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorHistoryCreateBulk.OnConflict +// documentation for more info. +func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistoryUpsert)) *ChannelMonitorHistoryUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorHistoryUpsert{UpdateSet: update}) + })) + return u +} + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetMonitorID(v) + }) +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateMonitorID() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateMonitorID() + }) +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetModel(v string) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateModel() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateModel() + }) +} + +// SetStatus sets the "status" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateStatus() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateStatus() + }) +} + +// SetLatencyMs sets the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetLatencyMs(v) + }) +} + +// AddLatencyMs adds v to the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) AddLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.AddLatencyMs(v) + }) +} + +// UpdateLatencyMs sets the "latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateLatencyMs() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateLatencyMs() + }) +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) ClearLatencyMs() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearLatencyMs() + }) +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetPingLatencyMs(v) + }) +} + +// AddPingLatencyMs adds v to the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.AddPingLatencyMs(v) + }) +} + +// UpdatePingLatencyMs sets the "ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdatePingLatencyMs() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdatePingLatencyMs() + }) +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (u *ChannelMonitorHistoryUpsertBulk) ClearPingLatencyMs() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearPingLatencyMs() + }) +} + +// SetMessage sets the "message" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetMessage(v string) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetMessage(v) + }) +} + +// UpdateMessage sets the "message" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateMessage() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateMessage() + }) +} + +// ClearMessage clears the value of the "message" field. +func (u *ChannelMonitorHistoryUpsertBulk) ClearMessage() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearMessage() + }) +} + +// SetCheckedAt sets the "checked_at" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetCheckedAt(v) + }) +} + +// UpdateCheckedAt sets the "checked_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateCheckedAt() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateCheckedAt() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorHistoryUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorHistoryCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorHistoryCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorHistoryUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitorhistory_delete.go b/backend/ent/channelmonitorhistory_delete.go new file mode 100644 index 00000000..97110e69 --- /dev/null +++ b/backend/ent/channelmonitorhistory_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorHistoryDelete is the builder for deleting a ChannelMonitorHistory entity. +type ChannelMonitorHistoryDelete struct { + config + hooks []Hook + mutation *ChannelMonitorHistoryMutation +} + +// Where appends a list predicates to the ChannelMonitorHistoryDelete builder. +func (_d *ChannelMonitorHistoryDelete) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ChannelMonitorHistoryDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorHistoryDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ChannelMonitorHistoryDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ChannelMonitorHistoryDeleteOne is the builder for deleting a single ChannelMonitorHistory entity. +type ChannelMonitorHistoryDeleteOne struct { + _d *ChannelMonitorHistoryDelete +} + +// Where appends a list predicates to the ChannelMonitorHistoryDelete builder. +func (_d *ChannelMonitorHistoryDeleteOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ChannelMonitorHistoryDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{channelmonitorhistory.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorHistoryDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitorhistory_query.go b/backend/ent/channelmonitorhistory_query.go new file mode 100644 index 00000000..1fb872ad --- /dev/null +++ b/backend/ent/channelmonitorhistory_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorHistoryQuery is the builder for querying ChannelMonitorHistory entities. +type ChannelMonitorHistoryQuery struct { + config + ctx *QueryContext + order []channelmonitorhistory.OrderOption + inters []Interceptor + predicates []predicate.ChannelMonitorHistory + withMonitor *ChannelMonitorQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ChannelMonitorHistoryQuery builder. +func (_q *ChannelMonitorHistoryQuery) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ChannelMonitorHistoryQuery) Limit(limit int) *ChannelMonitorHistoryQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ChannelMonitorHistoryQuery) Offset(offset int) *ChannelMonitorHistoryQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ChannelMonitorHistoryQuery) Unique(unique bool) *ChannelMonitorHistoryQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ChannelMonitorHistoryQuery) Order(o ...channelmonitorhistory.OrderOption) *ChannelMonitorHistoryQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryMonitor chains the current query on the "monitor" edge. +func (_q *ChannelMonitorHistoryQuery) QueryMonitor() *ChannelMonitorQuery { + query := (&ChannelMonitorClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitorhistory.Table, channelmonitorhistory.FieldID, selector), + sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, channelmonitorhistory.MonitorTable, channelmonitorhistory.MonitorColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ChannelMonitorHistory entity from the query. +// Returns a *NotFoundError when no ChannelMonitorHistory was found. +func (_q *ChannelMonitorHistoryQuery) First(ctx context.Context) (*ChannelMonitorHistory, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{channelmonitorhistory.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) FirstX(ctx context.Context) *ChannelMonitorHistory { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ChannelMonitorHistory ID from the query. +// Returns a *NotFoundError when no ChannelMonitorHistory ID was found. +func (_q *ChannelMonitorHistoryQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{channelmonitorhistory.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ChannelMonitorHistory entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ChannelMonitorHistory entity is found. +// Returns a *NotFoundError when no ChannelMonitorHistory entities are found. +func (_q *ChannelMonitorHistoryQuery) Only(ctx context.Context) (*ChannelMonitorHistory, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{channelmonitorhistory.Label} + default: + return nil, &NotSingularError{channelmonitorhistory.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) OnlyX(ctx context.Context) *ChannelMonitorHistory { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ChannelMonitorHistory ID in the query. +// Returns a *NotSingularError when more than one ChannelMonitorHistory ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ChannelMonitorHistoryQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{channelmonitorhistory.Label} + default: + err = &NotSingularError{channelmonitorhistory.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ChannelMonitorHistories. +func (_q *ChannelMonitorHistoryQuery) All(ctx context.Context) ([]*ChannelMonitorHistory, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ChannelMonitorHistory, *ChannelMonitorHistoryQuery]() + return withInterceptors[[]*ChannelMonitorHistory](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) AllX(ctx context.Context) []*ChannelMonitorHistory { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ChannelMonitorHistory IDs. +func (_q *ChannelMonitorHistoryQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(channelmonitorhistory.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ChannelMonitorHistoryQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorHistoryQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ChannelMonitorHistoryQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ChannelMonitorHistoryQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ChannelMonitorHistoryQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ChannelMonitorHistoryQuery) Clone() *ChannelMonitorHistoryQuery { + if _q == nil { + return nil + } + return &ChannelMonitorHistoryQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]channelmonitorhistory.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ChannelMonitorHistory{}, _q.predicates...), + withMonitor: _q.withMonitor.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithMonitor tells the query-builder to eager-load the nodes that are connected to +// the "monitor" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorHistoryQuery { + query := (&ChannelMonitorClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withMonitor = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// MonitorID int64 `json:"monitor_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ChannelMonitorHistory.Query(). +// GroupBy(channelmonitorhistory.FieldMonitorID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ChannelMonitorHistoryGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = channelmonitorhistory.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// MonitorID int64 `json:"monitor_id,omitempty"` +// } +// +// client.ChannelMonitorHistory.Query(). +// Select(channelmonitorhistory.FieldMonitorID). +// Scan(ctx, &v) +func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ChannelMonitorHistorySelect{ChannelMonitorHistoryQuery: _q} + sbuild.label = channelmonitorhistory.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ChannelMonitorHistorySelect configured with the given aggregations. +func (_q *ChannelMonitorHistoryQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ChannelMonitorHistoryQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !channelmonitorhistory.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ChannelMonitorHistoryQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorHistory, error) { + var ( + nodes = []*ChannelMonitorHistory{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withMonitor != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ChannelMonitorHistory).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ChannelMonitorHistory{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withMonitor; query != nil { + if err := _q.loadMonitor(ctx, query, nodes, nil, + func(n *ChannelMonitorHistory, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *ChannelMonitorHistoryQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorHistory, init func(*ChannelMonitorHistory), assign func(*ChannelMonitorHistory, *ChannelMonitor)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*ChannelMonitorHistory) + for i := range nodes { + fk := nodes[i].MonitorID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(channelmonitor.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *ChannelMonitorHistoryQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ChannelMonitorHistoryQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID) + for i := range fields { + if fields[i] != channelmonitorhistory.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withMonitor != nil { + _spec.Node.AddColumnOnce(channelmonitorhistory.FieldMonitorID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ChannelMonitorHistoryQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(channelmonitorhistory.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = channelmonitorhistory.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *ChannelMonitorHistoryQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorHistoryQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *ChannelMonitorHistoryQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorHistoryQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// ChannelMonitorHistoryGroupBy is the group-by builder for ChannelMonitorHistory entities. +type ChannelMonitorHistoryGroupBy struct { + selector + build *ChannelMonitorHistoryQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ChannelMonitorHistoryGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistoryGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ChannelMonitorHistoryGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistoryGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ChannelMonitorHistoryGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ChannelMonitorHistorySelect is the builder for selecting fields of ChannelMonitorHistory entities. +type ChannelMonitorHistorySelect struct { + *ChannelMonitorHistoryQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ChannelMonitorHistorySelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorHistorySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ChannelMonitorHistorySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorHistoryQuery, *ChannelMonitorHistorySelect](ctx, _s.ChannelMonitorHistoryQuery, _s, _s.inters, v) +} + +func (_s *ChannelMonitorHistorySelect) sqlScan(ctx context.Context, root *ChannelMonitorHistoryQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/channelmonitorhistory_update.go b/backend/ent/channelmonitorhistory_update.go new file mode 100644 index 00000000..a85a8072 --- /dev/null +++ b/backend/ent/channelmonitorhistory_update.go @@ -0,0 +1,635 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorHistoryUpdate is the builder for updating ChannelMonitorHistory entities. +type ChannelMonitorHistoryUpdate struct { + config + hooks []Hook + mutation *ChannelMonitorHistoryMutation +} + +// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder. +func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetMonitorID sets the "monitor_id" field. +func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate { + _u.mutation.SetMonitorID(v) + return _u +} + +// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetMonitorID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *ChannelMonitorHistoryUpdate) SetModel(v string) *ChannelMonitorHistoryUpdate { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableModel(v *string) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *ChannelMonitorHistoryUpdate) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetLatencyMs sets the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) SetLatencyMs(v int) *ChannelMonitorHistoryUpdate { + _u.mutation.ResetLatencyMs() + _u.mutation.SetLatencyMs(v) + return _u +} + +// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetLatencyMs(*v) + } + return _u +} + +// AddLatencyMs adds value to the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) AddLatencyMs(v int) *ChannelMonitorHistoryUpdate { + _u.mutation.AddLatencyMs(v) + return _u +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) ClearLatencyMs() *ChannelMonitorHistoryUpdate { + _u.mutation.ClearLatencyMs() + return _u +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdate { + _u.mutation.ResetPingLatencyMs() + _u.mutation.SetPingLatencyMs(v) + return _u +} + +// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetPingLatencyMs(*v) + } + return _u +} + +// AddPingLatencyMs adds value to the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdate { + _u.mutation.AddPingLatencyMs(v) + return _u +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdate) ClearPingLatencyMs() *ChannelMonitorHistoryUpdate { + _u.mutation.ClearPingLatencyMs() + return _u +} + +// SetMessage sets the "message" field. +func (_u *ChannelMonitorHistoryUpdate) SetMessage(v string) *ChannelMonitorHistoryUpdate { + _u.mutation.SetMessage(v) + return _u +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetMessage(*v) + } + return _u +} + +// ClearMessage clears the value of the "message" field. +func (_u *ChannelMonitorHistoryUpdate) ClearMessage() *ChannelMonitorHistoryUpdate { + _u.mutation.ClearMessage() + return _u +} + +// SetCheckedAt sets the "checked_at" field. +func (_u *ChannelMonitorHistoryUpdate) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdate { + _u.mutation.SetCheckedAt(v) + return _u +} + +// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetCheckedAt(*v) + } + return _u +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorHistoryUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdate { + return _u.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorHistoryMutation object of the builder. +func (_u *ChannelMonitorHistoryUpdate) Mutation() *ChannelMonitorHistoryMutation { + return _u.mutation +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorHistoryUpdate) ClearMonitor() *ChannelMonitorHistoryUpdate { + _u.mutation.ClearMonitor() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ChannelMonitorHistoryUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorHistoryUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ChannelMonitorHistoryUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorHistoryUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorHistoryUpdate) check() error { + if v, ok := _u.mutation.Model(); ok { + if err := channelmonitorhistory.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := channelmonitorhistory.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)} + } + } + if v, ok := _u.mutation.Message(); ok { + if err := channelmonitorhistory.MessageValidator(v); err != nil { + return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)} + } + } + if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`) + } + return nil +} + +func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value) + } + if value, ok := _u.mutation.LatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedLatencyMs(); ok { + _spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value) + } + if _u.mutation.LatencyMsCleared() { + _spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt) + } + if value, ok := _u.mutation.PingLatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPingLatencyMs(); ok { + _spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value) + } + if _u.mutation.PingLatencyMsCleared() { + _spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt) + } + if value, ok := _u.mutation.Message(); ok { + _spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value) + } + if _u.mutation.MessageCleared() { + _spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString) + } + if value, ok := _u.mutation.CheckedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value) + } + if _u.mutation.MonitorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitorhistory.MonitorTable, + Columns: []string{channelmonitorhistory.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitorhistory.MonitorTable, + Columns: []string{channelmonitorhistory.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitorhistory.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ChannelMonitorHistoryUpdateOne is the builder for updating a single ChannelMonitorHistory entity. +type ChannelMonitorHistoryUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ChannelMonitorHistoryMutation +} + +// SetMonitorID sets the "monitor_id" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetMonitorID(v) + return _u +} + +// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetMonitorID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetModel(v string) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableModel(v *string) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetStatus(v channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableStatus(v *channelmonitorhistory.Status) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetLatencyMs sets the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetLatencyMs(v int) *ChannelMonitorHistoryUpdateOne { + _u.mutation.ResetLatencyMs() + _u.mutation.SetLatencyMs(v) + return _u +} + +// SetNillableLatencyMs sets the "latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetLatencyMs(*v) + } + return _u +} + +// AddLatencyMs adds value to the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) AddLatencyMs(v int) *ChannelMonitorHistoryUpdateOne { + _u.mutation.AddLatencyMs(v) + return _u +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) ClearLatencyMs() *ChannelMonitorHistoryUpdateOne { + _u.mutation.ClearLatencyMs() + return _u +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne { + _u.mutation.ResetPingLatencyMs() + _u.mutation.SetPingLatencyMs(v) + return _u +} + +// SetNillablePingLatencyMs sets the "ping_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillablePingLatencyMs(v *int) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetPingLatencyMs(*v) + } + return _u +} + +// AddPingLatencyMs adds value to the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) AddPingLatencyMs(v int) *ChannelMonitorHistoryUpdateOne { + _u.mutation.AddPingLatencyMs(v) + return _u +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (_u *ChannelMonitorHistoryUpdateOne) ClearPingLatencyMs() *ChannelMonitorHistoryUpdateOne { + _u.mutation.ClearPingLatencyMs() + return _u +} + +// SetMessage sets the "message" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetMessage(v string) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetMessage(v) + return _u +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableMessage(v *string) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetMessage(*v) + } + return _u +} + +// ClearMessage clears the value of the "message" field. +func (_u *ChannelMonitorHistoryUpdateOne) ClearMessage() *ChannelMonitorHistoryUpdateOne { + _u.mutation.ClearMessage() + return _u +} + +// SetCheckedAt sets the "checked_at" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetCheckedAt(v time.Time) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetCheckedAt(v) + return _u +} + +// SetNillableCheckedAt sets the "checked_at" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableCheckedAt(v *time.Time) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetCheckedAt(*v) + } + return _u +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorHistoryUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorHistoryUpdateOne { + return _u.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorHistoryMutation object of the builder. +func (_u *ChannelMonitorHistoryUpdateOne) Mutation() *ChannelMonitorHistoryMutation { + return _u.mutation +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorHistoryUpdateOne) ClearMonitor() *ChannelMonitorHistoryUpdateOne { + _u.mutation.ClearMonitor() + return _u +} + +// Where appends a list predicates to the ChannelMonitorHistoryUpdate builder. +func (_u *ChannelMonitorHistoryUpdateOne) Where(ps ...predicate.ChannelMonitorHistory) *ChannelMonitorHistoryUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ChannelMonitorHistoryUpdateOne) Select(field string, fields ...string) *ChannelMonitorHistoryUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated ChannelMonitorHistory entity. +func (_u *ChannelMonitorHistoryUpdateOne) Save(ctx context.Context) (*ChannelMonitorHistory, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorHistoryUpdateOne) SaveX(ctx context.Context) *ChannelMonitorHistory { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ChannelMonitorHistoryUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorHistoryUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorHistoryUpdateOne) check() error { + if v, ok := _u.mutation.Model(); ok { + if err := channelmonitorhistory.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.model": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := channelmonitorhistory.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.status": %w`, err)} + } + } + if v, ok := _u.mutation.Message(); ok { + if err := channelmonitorhistory.MessageValidator(v); err != nil { + return &ValidationError{Name: "message", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorHistory.message": %w`, err)} + } + } + if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ChannelMonitorHistory.monitor"`) + } + return nil +} + +func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorHistory, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitorhistory.Table, channelmonitorhistory.Columns, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorHistory.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitorhistory.FieldID) + for _, f := range fields { + if !channelmonitorhistory.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != channelmonitorhistory.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(channelmonitorhistory.FieldStatus, field.TypeEnum, value) + } + if value, ok := _u.mutation.LatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedLatencyMs(); ok { + _spec.AddField(channelmonitorhistory.FieldLatencyMs, field.TypeInt, value) + } + if _u.mutation.LatencyMsCleared() { + _spec.ClearField(channelmonitorhistory.FieldLatencyMs, field.TypeInt) + } + if value, ok := _u.mutation.PingLatencyMs(); ok { + _spec.SetField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPingLatencyMs(); ok { + _spec.AddField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt, value) + } + if _u.mutation.PingLatencyMsCleared() { + _spec.ClearField(channelmonitorhistory.FieldPingLatencyMs, field.TypeInt) + } + if value, ok := _u.mutation.Message(); ok { + _spec.SetField(channelmonitorhistory.FieldMessage, field.TypeString, value) + } + if _u.mutation.MessageCleared() { + _spec.ClearField(channelmonitorhistory.FieldMessage, field.TypeString) + } + if value, ok := _u.mutation.CheckedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldCheckedAt, field.TypeTime, value) + } + if _u.mutation.MonitorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitorhistory.MonitorTable, + Columns: []string{channelmonitorhistory.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitorhistory.MonitorTable, + Columns: []string{channelmonitorhistory.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ChannelMonitorHistory{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitorhistory.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go index b02f519b..72ef2a36 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -22,6 +22,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" @@ -68,6 +70,10 @@ type Client struct { AuthIdentity *AuthIdentityClient // AuthIdentityChannel is the client for interacting with the AuthIdentityChannel builders. AuthIdentityChannel *AuthIdentityChannelClient + // ChannelMonitor is the client for interacting with the ChannelMonitor builders. + ChannelMonitor *ChannelMonitorClient + // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. + ChannelMonitorHistory *ChannelMonitorHistoryClient // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. @@ -132,6 +138,8 @@ func (c *Client) init() { c.AnnouncementRead = NewAnnouncementReadClient(c.config) c.AuthIdentity = NewAuthIdentityClient(c.config) c.AuthIdentityChannel = NewAuthIdentityChannelClient(c.config) + c.ChannelMonitor = NewChannelMonitorClient(c.config) + c.ChannelMonitorHistory = NewChannelMonitorHistoryClient(c.config) c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config) c.Group = NewGroupClient(c.config) c.IdempotencyRecord = NewIdempotencyRecordClient(c.config) @@ -254,6 +262,8 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { AnnouncementRead: NewAnnouncementReadClient(cfg), AuthIdentity: NewAuthIdentityClient(cfg), AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), + ChannelMonitor: NewChannelMonitorClient(cfg), + ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), IdempotencyRecord: NewIdempotencyRecordClient(cfg), @@ -303,6 +313,8 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) AnnouncementRead: NewAnnouncementReadClient(cfg), AuthIdentity: NewAuthIdentityClient(cfg), AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), + ChannelMonitor: NewChannelMonitorClient(cfg), + ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), IdempotencyRecord: NewIdempotencyRecordClient(cfg), @@ -356,12 +368,13 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.AuthIdentity, c.AuthIdentityChannel, c.ErrorPassthroughRule, c.Group, - c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog, - c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, - c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, - c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, + c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, + c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, + c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, + c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, + c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) @@ -373,12 +386,13 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.AuthIdentity, c.AuthIdentityChannel, c.ErrorPassthroughRule, c.Group, - c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog, - c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, - c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, - c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, + c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, + c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, + c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, + c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, + c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) @@ -402,6 +416,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.AuthIdentity.mutate(ctx, m) case *AuthIdentityChannelMutation: return c.AuthIdentityChannel.mutate(ctx, m) + case *ChannelMonitorMutation: + return c.ChannelMonitor.mutate(ctx, m) + case *ChannelMonitorHistoryMutation: + return c.ChannelMonitorHistory.mutate(ctx, m) case *ErrorPassthroughRuleMutation: return c.ErrorPassthroughRule.mutate(ctx, m) case *GroupMutation: @@ -1595,6 +1613,304 @@ func (c *AuthIdentityChannelClient) mutate(ctx context.Context, m *AuthIdentityC } } +// ChannelMonitorClient is a client for the ChannelMonitor schema. +type ChannelMonitorClient struct { + config +} + +// NewChannelMonitorClient returns a client for the ChannelMonitor from the given config. +func NewChannelMonitorClient(c config) *ChannelMonitorClient { + return &ChannelMonitorClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `channelmonitor.Hooks(f(g(h())))`. +func (c *ChannelMonitorClient) Use(hooks ...Hook) { + c.hooks.ChannelMonitor = append(c.hooks.ChannelMonitor, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `channelmonitor.Intercept(f(g(h())))`. +func (c *ChannelMonitorClient) Intercept(interceptors ...Interceptor) { + c.inters.ChannelMonitor = append(c.inters.ChannelMonitor, interceptors...) +} + +// Create returns a builder for creating a ChannelMonitor entity. +func (c *ChannelMonitorClient) Create() *ChannelMonitorCreate { + mutation := newChannelMonitorMutation(c.config, OpCreate) + return &ChannelMonitorCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ChannelMonitor entities. +func (c *ChannelMonitorClient) CreateBulk(builders ...*ChannelMonitorCreate) *ChannelMonitorCreateBulk { + return &ChannelMonitorCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ChannelMonitorClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorCreate, int)) *ChannelMonitorCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ChannelMonitorCreateBulk{err: fmt.Errorf("calling to ChannelMonitorClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ChannelMonitorCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ChannelMonitorCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ChannelMonitor. +func (c *ChannelMonitorClient) Update() *ChannelMonitorUpdate { + mutation := newChannelMonitorMutation(c.config, OpUpdate) + return &ChannelMonitorUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ChannelMonitorClient) UpdateOne(_m *ChannelMonitor) *ChannelMonitorUpdateOne { + mutation := newChannelMonitorMutation(c.config, OpUpdateOne, withChannelMonitor(_m)) + return &ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ChannelMonitorClient) UpdateOneID(id int64) *ChannelMonitorUpdateOne { + mutation := newChannelMonitorMutation(c.config, OpUpdateOne, withChannelMonitorID(id)) + return &ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ChannelMonitor. +func (c *ChannelMonitorClient) Delete() *ChannelMonitorDelete { + mutation := newChannelMonitorMutation(c.config, OpDelete) + return &ChannelMonitorDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ChannelMonitorClient) DeleteOne(_m *ChannelMonitor) *ChannelMonitorDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ChannelMonitorClient) DeleteOneID(id int64) *ChannelMonitorDeleteOne { + builder := c.Delete().Where(channelmonitor.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ChannelMonitorDeleteOne{builder} +} + +// Query returns a query builder for ChannelMonitor. +func (c *ChannelMonitorClient) Query() *ChannelMonitorQuery { + return &ChannelMonitorQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeChannelMonitor}, + inters: c.Interceptors(), + } +} + +// Get returns a ChannelMonitor entity by its id. +func (c *ChannelMonitorClient) Get(ctx context.Context, id int64) (*ChannelMonitor, error) { + return c.Query().Where(channelmonitor.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ChannelMonitorClient) GetX(ctx context.Context, id int64) *ChannelMonitor { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryHistory queries the history edge of a ChannelMonitor. +func (c *ChannelMonitorClient) QueryHistory(_m *ChannelMonitor) *ChannelMonitorHistoryQuery { + query := (&ChannelMonitorHistoryClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id), + sqlgraph.To(channelmonitorhistory.Table, channelmonitorhistory.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.HistoryTable, channelmonitor.HistoryColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ChannelMonitorClient) Hooks() []Hook { + return c.hooks.ChannelMonitor +} + +// Interceptors returns the client interceptors. +func (c *ChannelMonitorClient) Interceptors() []Interceptor { + return c.inters.ChannelMonitor +} + +func (c *ChannelMonitorClient) mutate(ctx context.Context, m *ChannelMonitorMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ChannelMonitorCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ChannelMonitorUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ChannelMonitorUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ChannelMonitorDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ChannelMonitor mutation op: %q", m.Op()) + } +} + +// ChannelMonitorHistoryClient is a client for the ChannelMonitorHistory schema. +type ChannelMonitorHistoryClient struct { + config +} + +// NewChannelMonitorHistoryClient returns a client for the ChannelMonitorHistory from the given config. +func NewChannelMonitorHistoryClient(c config) *ChannelMonitorHistoryClient { + return &ChannelMonitorHistoryClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `channelmonitorhistory.Hooks(f(g(h())))`. +func (c *ChannelMonitorHistoryClient) Use(hooks ...Hook) { + c.hooks.ChannelMonitorHistory = append(c.hooks.ChannelMonitorHistory, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `channelmonitorhistory.Intercept(f(g(h())))`. +func (c *ChannelMonitorHistoryClient) Intercept(interceptors ...Interceptor) { + c.inters.ChannelMonitorHistory = append(c.inters.ChannelMonitorHistory, interceptors...) +} + +// Create returns a builder for creating a ChannelMonitorHistory entity. +func (c *ChannelMonitorHistoryClient) Create() *ChannelMonitorHistoryCreate { + mutation := newChannelMonitorHistoryMutation(c.config, OpCreate) + return &ChannelMonitorHistoryCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ChannelMonitorHistory entities. +func (c *ChannelMonitorHistoryClient) CreateBulk(builders ...*ChannelMonitorHistoryCreate) *ChannelMonitorHistoryCreateBulk { + return &ChannelMonitorHistoryCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ChannelMonitorHistoryClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorHistoryCreate, int)) *ChannelMonitorHistoryCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ChannelMonitorHistoryCreateBulk{err: fmt.Errorf("calling to ChannelMonitorHistoryClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ChannelMonitorHistoryCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ChannelMonitorHistoryCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ChannelMonitorHistory. +func (c *ChannelMonitorHistoryClient) Update() *ChannelMonitorHistoryUpdate { + mutation := newChannelMonitorHistoryMutation(c.config, OpUpdate) + return &ChannelMonitorHistoryUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ChannelMonitorHistoryClient) UpdateOne(_m *ChannelMonitorHistory) *ChannelMonitorHistoryUpdateOne { + mutation := newChannelMonitorHistoryMutation(c.config, OpUpdateOne, withChannelMonitorHistory(_m)) + return &ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ChannelMonitorHistoryClient) UpdateOneID(id int64) *ChannelMonitorHistoryUpdateOne { + mutation := newChannelMonitorHistoryMutation(c.config, OpUpdateOne, withChannelMonitorHistoryID(id)) + return &ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ChannelMonitorHistory. +func (c *ChannelMonitorHistoryClient) Delete() *ChannelMonitorHistoryDelete { + mutation := newChannelMonitorHistoryMutation(c.config, OpDelete) + return &ChannelMonitorHistoryDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ChannelMonitorHistoryClient) DeleteOne(_m *ChannelMonitorHistory) *ChannelMonitorHistoryDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ChannelMonitorHistoryClient) DeleteOneID(id int64) *ChannelMonitorHistoryDeleteOne { + builder := c.Delete().Where(channelmonitorhistory.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ChannelMonitorHistoryDeleteOne{builder} +} + +// Query returns a query builder for ChannelMonitorHistory. +func (c *ChannelMonitorHistoryClient) Query() *ChannelMonitorHistoryQuery { + return &ChannelMonitorHistoryQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeChannelMonitorHistory}, + inters: c.Interceptors(), + } +} + +// Get returns a ChannelMonitorHistory entity by its id. +func (c *ChannelMonitorHistoryClient) Get(ctx context.Context, id int64) (*ChannelMonitorHistory, error) { + return c.Query().Where(channelmonitorhistory.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ChannelMonitorHistoryClient) GetX(ctx context.Context, id int64) *ChannelMonitorHistory { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryMonitor queries the monitor edge of a ChannelMonitorHistory. +func (c *ChannelMonitorHistoryClient) QueryMonitor(_m *ChannelMonitorHistory) *ChannelMonitorQuery { + query := (&ChannelMonitorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitorhistory.Table, channelmonitorhistory.FieldID, id), + sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, channelmonitorhistory.MonitorTable, channelmonitorhistory.MonitorColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ChannelMonitorHistoryClient) Hooks() []Hook { + return c.hooks.ChannelMonitorHistory +} + +// Interceptors returns the client interceptors. +func (c *ChannelMonitorHistoryClient) Interceptors() []Interceptor { + return c.inters.ChannelMonitorHistory +} + +func (c *ChannelMonitorHistoryClient) mutate(ctx context.Context, m *ChannelMonitorHistoryMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ChannelMonitorHistoryCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ChannelMonitorHistoryUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ChannelMonitorHistoryUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ChannelMonitorHistoryDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ChannelMonitorHistory mutation op: %q", m.Op()) + } +} + // ErrorPassthroughRuleClient is a client for the ErrorPassthroughRule schema. type ErrorPassthroughRuleClient struct { config @@ -5355,21 +5671,23 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, - AuthIdentityChannel, ErrorPassthroughRule, Group, IdempotencyRecord, - IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder, - PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile, - UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Hook + AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, + ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, + PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, + PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, + SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, - AuthIdentityChannel, ErrorPassthroughRule, Group, IdempotencyRecord, - IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder, - PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy, - RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile, - UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, - UserAttributeValue, UserSubscription []ent.Interceptor + AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, + ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, + PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, + PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, + SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 339e5369..e03ea74e 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -19,6 +19,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" @@ -109,6 +111,8 @@ func checkColumn(t, c string) error { announcementread.Table: announcementread.ValidColumn, authidentity.Table: authidentity.ValidColumn, authidentitychannel.Table: authidentitychannel.ValidColumn, + channelmonitor.Table: channelmonitor.ValidColumn, + channelmonitorhistory.Table: channelmonitorhistory.ValidColumn, errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, group.Table: group.ValidColumn, idempotencyrecord.Table: idempotencyrecord.ValidColumn, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index 46ac02bc..e2ffec31 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -93,6 +93,30 @@ func (f AuthIdentityChannelFunc) Mutate(ctx context.Context, m ent.Mutation) (en return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AuthIdentityChannelMutation", m) } +// The ChannelMonitorFunc type is an adapter to allow the use of ordinary +// function as ChannelMonitor mutator. +type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ChannelMonitorMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m) +} + +// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary +// function as ChannelMonitorHistory mutator. +type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ChannelMonitorHistoryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ChannelMonitorHistoryMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorHistoryMutation", m) +} + // The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary // function as ErrorPassthroughRule mutator. type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 157c5122..1f11755b 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -15,6 +15,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" @@ -286,6 +288,60 @@ func (f TraverseAuthIdentityChannel) Traverse(ctx context.Context, q ent.Query) return fmt.Errorf("unexpected query type %T. expect *ent.AuthIdentityChannelQuery", q) } +// The ChannelMonitorFunc type is an adapter to allow the use of ordinary function as a Querier. +type ChannelMonitorFunc func(context.Context, *ent.ChannelMonitorQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ChannelMonitorFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ChannelMonitorQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q) +} + +// The TraverseChannelMonitor type is an adapter to allow the use of ordinary function as Traverser. +type TraverseChannelMonitor func(context.Context, *ent.ChannelMonitorQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseChannelMonitor) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ChannelMonitorQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q) +} + +// The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier. +type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ChannelMonitorHistoryFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q) +} + +// The TraverseChannelMonitorHistory type is an adapter to allow the use of ordinary function as Traverser. +type TraverseChannelMonitorHistory func(context.Context, *ent.ChannelMonitorHistoryQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseChannelMonitorHistory) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseChannelMonitorHistory) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ChannelMonitorHistoryQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorHistoryQuery", q) +} + // The ErrorPassthroughRuleFunc type is an adapter to allow the use of ordinary function as a Querier. type ErrorPassthroughRuleFunc func(context.Context, *ent.ErrorPassthroughRuleQuery) (ent.Value, error) @@ -924,6 +980,10 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.AuthIdentityQuery, predicate.AuthIdentity, authidentity.OrderOption]{typ: ent.TypeAuthIdentity, tq: q}, nil case *ent.AuthIdentityChannelQuery: return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil + case *ent.ChannelMonitorQuery: + return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil + case *ent.ChannelMonitorHistoryQuery: + return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil case *ent.ErrorPassthroughRuleQuery: return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil case *ent.GroupQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 40b326a9..3dc17fa2 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -421,6 +421,83 @@ var ( }, }, } + // ChannelMonitorsColumns holds the columns for the "channel_monitors" table. + ChannelMonitorsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "provider", Type: field.TypeEnum, Enums: []string{"openai", "anthropic", "gemini"}}, + {Name: "endpoint", Type: field.TypeString, Size: 500}, + {Name: "api_key_encrypted", Type: field.TypeString}, + {Name: "primary_model", Type: field.TypeString, Size: 200}, + {Name: "extra_models", Type: field.TypeJSON}, + {Name: "group_name", Type: field.TypeString, Nullable: true, Size: 100, Default: ""}, + {Name: "enabled", Type: field.TypeBool, Default: true}, + {Name: "interval_seconds", Type: field.TypeInt}, + {Name: "last_checked_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_by", Type: field.TypeInt64}, + } + // ChannelMonitorsTable holds the schema information for the "channel_monitors" table. + ChannelMonitorsTable = &schema.Table{ + Name: "channel_monitors", + Columns: ChannelMonitorsColumns, + PrimaryKey: []*schema.Column{ChannelMonitorsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "channelmonitor_enabled_last_checked_at", + Unique: false, + Columns: []*schema.Column{ChannelMonitorsColumns[10], ChannelMonitorsColumns[12]}, + }, + { + Name: "channelmonitor_provider", + Unique: false, + Columns: []*schema.Column{ChannelMonitorsColumns[4]}, + }, + { + Name: "channelmonitor_group_name", + Unique: false, + Columns: []*schema.Column{ChannelMonitorsColumns[9]}, + }, + }, + } + // ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table. + ChannelMonitorHistoriesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "model", Type: field.TypeString, Size: 200}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}}, + {Name: "latency_ms", Type: field.TypeInt, Nullable: true}, + {Name: "ping_latency_ms", Type: field.TypeInt, Nullable: true}, + {Name: "message", Type: field.TypeString, Nullable: true, Size: 500, Default: ""}, + {Name: "checked_at", Type: field.TypeTime}, + {Name: "monitor_id", Type: field.TypeInt64}, + } + // ChannelMonitorHistoriesTable holds the schema information for the "channel_monitor_histories" table. + ChannelMonitorHistoriesTable = &schema.Table{ + Name: "channel_monitor_histories", + Columns: ChannelMonitorHistoriesColumns, + PrimaryKey: []*schema.Column{ChannelMonitorHistoriesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "channel_monitor_histories_channel_monitors_history", + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]}, + RefColumns: []*schema.Column{ChannelMonitorsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "channelmonitorhistory_monitor_id_model_checked_at", + Unique: false, + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7], ChannelMonitorHistoriesColumns[1], ChannelMonitorHistoriesColumns[6]}, + }, + { + Name: "channelmonitorhistory_checked_at", + Unique: false, + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[6]}, + }, + }, + } // ErrorPassthroughRulesColumns holds the columns for the "error_passthrough_rules" table. ErrorPassthroughRulesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -1276,7 +1353,7 @@ var ( {Name: "totp_secret_encrypted", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "totp_enabled", Type: field.TypeBool, Default: false}, {Name: "totp_enabled_at", Type: field.TypeTime, Nullable: true}, - {Name: "signup_source", Type: field.TypeString, Size: 20, Default: "email"}, + {Name: "signup_source", Type: field.TypeString, Default: "email"}, {Name: "last_login_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "last_active_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "balance_notify_enabled", Type: field.TypeBool, Default: true}, @@ -1520,6 +1597,8 @@ var ( AnnouncementReadsTable, AuthIdentitiesTable, AuthIdentityChannelsTable, + ChannelMonitorsTable, + ChannelMonitorHistoriesTable, ErrorPassthroughRulesTable, GroupsTable, IdempotencyRecordsTable, @@ -1577,6 +1656,13 @@ func init() { AuthIdentityChannelsTable.Annotation = &entsql.Annotation{ Table: "auth_identity_channels", } + ChannelMonitorsTable.Annotation = &entsql.Annotation{ + Table: "channel_monitors", + } + ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable + ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{ + Table: "channel_monitor_histories", + } ErrorPassthroughRulesTable.Annotation = &entsql.Annotation{ Table: "error_passthrough_rules", } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index ec4a4070..528ace5f 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -19,6 +19,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" @@ -62,6 +64,8 @@ const ( TypeAnnouncementRead = "AnnouncementRead" TypeAuthIdentity = "AuthIdentity" TypeAuthIdentityChannel = "AuthIdentityChannel" + TypeChannelMonitor = "ChannelMonitor" + TypeChannelMonitorHistory = "ChannelMonitorHistory" TypeErrorPassthroughRule = "ErrorPassthroughRule" TypeGroup = "Group" TypeIdempotencyRecord = "IdempotencyRecord" @@ -8734,6 +8738,2034 @@ func (m *AuthIdentityChannelMutation) ResetEdge(name string) error { return fmt.Errorf("unknown AuthIdentityChannel edge %s", name) } +// ChannelMonitorMutation represents an operation that mutates the ChannelMonitor nodes in the graph. +type ChannelMonitorMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + name *string + provider *channelmonitor.Provider + endpoint *string + api_key_encrypted *string + primary_model *string + extra_models *[]string + appendextra_models []string + group_name *string + enabled *bool + interval_seconds *int + addinterval_seconds *int + last_checked_at *time.Time + created_by *int64 + addcreated_by *int64 + clearedFields map[string]struct{} + history map[int64]struct{} + removedhistory map[int64]struct{} + clearedhistory bool + done bool + oldValue func(context.Context) (*ChannelMonitor, error) + predicates []predicate.ChannelMonitor +} + +var _ ent.Mutation = (*ChannelMonitorMutation)(nil) + +// channelmonitorOption allows management of the mutation configuration using functional options. +type channelmonitorOption func(*ChannelMonitorMutation) + +// newChannelMonitorMutation creates new mutation for the ChannelMonitor entity. +func newChannelMonitorMutation(c config, op Op, opts ...channelmonitorOption) *ChannelMonitorMutation { + m := &ChannelMonitorMutation{ + config: c, + op: op, + typ: TypeChannelMonitor, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withChannelMonitorID sets the ID field of the mutation. +func withChannelMonitorID(id int64) channelmonitorOption { + return func(m *ChannelMonitorMutation) { + var ( + err error + once sync.Once + value *ChannelMonitor + ) + m.oldValue = func(ctx context.Context) (*ChannelMonitor, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ChannelMonitor.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withChannelMonitor sets the old ChannelMonitor of the mutation. +func withChannelMonitor(node *ChannelMonitor) channelmonitorOption { + return func(m *ChannelMonitorMutation) { + m.oldValue = func(context.Context) (*ChannelMonitor, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ChannelMonitorMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ChannelMonitorMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ChannelMonitorMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ChannelMonitorMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ChannelMonitor.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *ChannelMonitorMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ChannelMonitorMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ChannelMonitorMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ChannelMonitorMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ChannelMonitorMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ChannelMonitorMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetName sets the "name" field. +func (m *ChannelMonitorMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ChannelMonitorMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ChannelMonitorMutation) ResetName() { + m.name = nil +} + +// SetProvider sets the "provider" field. +func (m *ChannelMonitorMutation) SetProvider(c channelmonitor.Provider) { + m.provider = &c +} + +// Provider returns the value of the "provider" field in the mutation. +func (m *ChannelMonitorMutation) Provider() (r channelmonitor.Provider, exists bool) { + v := m.provider + if v == nil { + return + } + return *v, true +} + +// OldProvider returns the old "provider" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldProvider(ctx context.Context) (v channelmonitor.Provider, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProvider is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProvider requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProvider: %w", err) + } + return oldValue.Provider, nil +} + +// ResetProvider resets all changes to the "provider" field. +func (m *ChannelMonitorMutation) ResetProvider() { + m.provider = nil +} + +// SetEndpoint sets the "endpoint" field. +func (m *ChannelMonitorMutation) SetEndpoint(s string) { + m.endpoint = &s +} + +// Endpoint returns the value of the "endpoint" field in the mutation. +func (m *ChannelMonitorMutation) Endpoint() (r string, exists bool) { + v := m.endpoint + if v == nil { + return + } + return *v, true +} + +// OldEndpoint returns the old "endpoint" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldEndpoint(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndpoint is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndpoint requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndpoint: %w", err) + } + return oldValue.Endpoint, nil +} + +// ResetEndpoint resets all changes to the "endpoint" field. +func (m *ChannelMonitorMutation) ResetEndpoint() { + m.endpoint = nil +} + +// SetAPIKeyEncrypted sets the "api_key_encrypted" field. +func (m *ChannelMonitorMutation) SetAPIKeyEncrypted(s string) { + m.api_key_encrypted = &s +} + +// APIKeyEncrypted returns the value of the "api_key_encrypted" field in the mutation. +func (m *ChannelMonitorMutation) APIKeyEncrypted() (r string, exists bool) { + v := m.api_key_encrypted + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyEncrypted returns the old "api_key_encrypted" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldAPIKeyEncrypted(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyEncrypted is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyEncrypted requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyEncrypted: %w", err) + } + return oldValue.APIKeyEncrypted, nil +} + +// ResetAPIKeyEncrypted resets all changes to the "api_key_encrypted" field. +func (m *ChannelMonitorMutation) ResetAPIKeyEncrypted() { + m.api_key_encrypted = nil +} + +// SetPrimaryModel sets the "primary_model" field. +func (m *ChannelMonitorMutation) SetPrimaryModel(s string) { + m.primary_model = &s +} + +// PrimaryModel returns the value of the "primary_model" field in the mutation. +func (m *ChannelMonitorMutation) PrimaryModel() (r string, exists bool) { + v := m.primary_model + if v == nil { + return + } + return *v, true +} + +// OldPrimaryModel returns the old "primary_model" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldPrimaryModel(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrimaryModel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrimaryModel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrimaryModel: %w", err) + } + return oldValue.PrimaryModel, nil +} + +// ResetPrimaryModel resets all changes to the "primary_model" field. +func (m *ChannelMonitorMutation) ResetPrimaryModel() { + m.primary_model = nil +} + +// SetExtraModels sets the "extra_models" field. +func (m *ChannelMonitorMutation) SetExtraModels(s []string) { + m.extra_models = &s + m.appendextra_models = nil +} + +// ExtraModels returns the value of the "extra_models" field in the mutation. +func (m *ChannelMonitorMutation) ExtraModels() (r []string, exists bool) { + v := m.extra_models + if v == nil { + return + } + return *v, true +} + +// OldExtraModels returns the old "extra_models" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldExtraModels(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtraModels is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtraModels requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtraModels: %w", err) + } + return oldValue.ExtraModels, nil +} + +// AppendExtraModels adds s to the "extra_models" field. +func (m *ChannelMonitorMutation) AppendExtraModels(s []string) { + m.appendextra_models = append(m.appendextra_models, s...) +} + +// AppendedExtraModels returns the list of values that were appended to the "extra_models" field in this mutation. +func (m *ChannelMonitorMutation) AppendedExtraModels() ([]string, bool) { + if len(m.appendextra_models) == 0 { + return nil, false + } + return m.appendextra_models, true +} + +// ResetExtraModels resets all changes to the "extra_models" field. +func (m *ChannelMonitorMutation) ResetExtraModels() { + m.extra_models = nil + m.appendextra_models = nil +} + +// SetGroupName sets the "group_name" field. +func (m *ChannelMonitorMutation) SetGroupName(s string) { + m.group_name = &s +} + +// GroupName returns the value of the "group_name" field in the mutation. +func (m *ChannelMonitorMutation) GroupName() (r string, exists bool) { + v := m.group_name + if v == nil { + return + } + return *v, true +} + +// OldGroupName returns the old "group_name" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldGroupName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupName: %w", err) + } + return oldValue.GroupName, nil +} + +// ClearGroupName clears the value of the "group_name" field. +func (m *ChannelMonitorMutation) ClearGroupName() { + m.group_name = nil + m.clearedFields[channelmonitor.FieldGroupName] = struct{}{} +} + +// GroupNameCleared returns if the "group_name" field was cleared in this mutation. +func (m *ChannelMonitorMutation) GroupNameCleared() bool { + _, ok := m.clearedFields[channelmonitor.FieldGroupName] + return ok +} + +// ResetGroupName resets all changes to the "group_name" field. +func (m *ChannelMonitorMutation) ResetGroupName() { + m.group_name = nil + delete(m.clearedFields, channelmonitor.FieldGroupName) +} + +// SetEnabled sets the "enabled" field. +func (m *ChannelMonitorMutation) SetEnabled(b bool) { + m.enabled = &b +} + +// Enabled returns the value of the "enabled" field in the mutation. +func (m *ChannelMonitorMutation) Enabled() (r bool, exists bool) { + v := m.enabled + if v == nil { + return + } + return *v, true +} + +// OldEnabled returns the old "enabled" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEnabled: %w", err) + } + return oldValue.Enabled, nil +} + +// ResetEnabled resets all changes to the "enabled" field. +func (m *ChannelMonitorMutation) ResetEnabled() { + m.enabled = nil +} + +// SetIntervalSeconds sets the "interval_seconds" field. +func (m *ChannelMonitorMutation) SetIntervalSeconds(i int) { + m.interval_seconds = &i + m.addinterval_seconds = nil +} + +// IntervalSeconds returns the value of the "interval_seconds" field in the mutation. +func (m *ChannelMonitorMutation) IntervalSeconds() (r int, exists bool) { + v := m.interval_seconds + if v == nil { + return + } + return *v, true +} + +// OldIntervalSeconds returns the old "interval_seconds" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldIntervalSeconds(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIntervalSeconds is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIntervalSeconds requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIntervalSeconds: %w", err) + } + return oldValue.IntervalSeconds, nil +} + +// AddIntervalSeconds adds i to the "interval_seconds" field. +func (m *ChannelMonitorMutation) AddIntervalSeconds(i int) { + if m.addinterval_seconds != nil { + *m.addinterval_seconds += i + } else { + m.addinterval_seconds = &i + } +} + +// AddedIntervalSeconds returns the value that was added to the "interval_seconds" field in this mutation. +func (m *ChannelMonitorMutation) AddedIntervalSeconds() (r int, exists bool) { + v := m.addinterval_seconds + if v == nil { + return + } + return *v, true +} + +// ResetIntervalSeconds resets all changes to the "interval_seconds" field. +func (m *ChannelMonitorMutation) ResetIntervalSeconds() { + m.interval_seconds = nil + m.addinterval_seconds = nil +} + +// SetLastCheckedAt sets the "last_checked_at" field. +func (m *ChannelMonitorMutation) SetLastCheckedAt(t time.Time) { + m.last_checked_at = &t +} + +// LastCheckedAt returns the value of the "last_checked_at" field in the mutation. +func (m *ChannelMonitorMutation) LastCheckedAt() (r time.Time, exists bool) { + v := m.last_checked_at + if v == nil { + return + } + return *v, true +} + +// OldLastCheckedAt returns the old "last_checked_at" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldLastCheckedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastCheckedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastCheckedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastCheckedAt: %w", err) + } + return oldValue.LastCheckedAt, nil +} + +// ClearLastCheckedAt clears the value of the "last_checked_at" field. +func (m *ChannelMonitorMutation) ClearLastCheckedAt() { + m.last_checked_at = nil + m.clearedFields[channelmonitor.FieldLastCheckedAt] = struct{}{} +} + +// LastCheckedAtCleared returns if the "last_checked_at" field was cleared in this mutation. +func (m *ChannelMonitorMutation) LastCheckedAtCleared() bool { + _, ok := m.clearedFields[channelmonitor.FieldLastCheckedAt] + return ok +} + +// ResetLastCheckedAt resets all changes to the "last_checked_at" field. +func (m *ChannelMonitorMutation) ResetLastCheckedAt() { + m.last_checked_at = nil + delete(m.clearedFields, channelmonitor.FieldLastCheckedAt) +} + +// SetCreatedBy sets the "created_by" field. +func (m *ChannelMonitorMutation) SetCreatedBy(i int64) { + m.created_by = &i + m.addcreated_by = nil +} + +// CreatedBy returns the value of the "created_by" field in the mutation. +func (m *ChannelMonitorMutation) CreatedBy() (r int64, exists bool) { + v := m.created_by + if v == nil { + return + } + return *v, true +} + +// OldCreatedBy returns the old "created_by" field's value of the ChannelMonitor entity. +// If the ChannelMonitor object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorMutation) OldCreatedBy(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedBy: %w", err) + } + return oldValue.CreatedBy, nil +} + +// AddCreatedBy adds i to the "created_by" field. +func (m *ChannelMonitorMutation) AddCreatedBy(i int64) { + if m.addcreated_by != nil { + *m.addcreated_by += i + } else { + m.addcreated_by = &i + } +} + +// AddedCreatedBy returns the value that was added to the "created_by" field in this mutation. +func (m *ChannelMonitorMutation) AddedCreatedBy() (r int64, exists bool) { + v := m.addcreated_by + if v == nil { + return + } + return *v, true +} + +// ResetCreatedBy resets all changes to the "created_by" field. +func (m *ChannelMonitorMutation) ResetCreatedBy() { + m.created_by = nil + m.addcreated_by = nil +} + +// AddHistoryIDs adds the "history" edge to the ChannelMonitorHistory entity by ids. +func (m *ChannelMonitorMutation) AddHistoryIDs(ids ...int64) { + if m.history == nil { + m.history = make(map[int64]struct{}) + } + for i := range ids { + m.history[ids[i]] = struct{}{} + } +} + +// ClearHistory clears the "history" edge to the ChannelMonitorHistory entity. +func (m *ChannelMonitorMutation) ClearHistory() { + m.clearedhistory = true +} + +// HistoryCleared reports if the "history" edge to the ChannelMonitorHistory entity was cleared. +func (m *ChannelMonitorMutation) HistoryCleared() bool { + return m.clearedhistory +} + +// RemoveHistoryIDs removes the "history" edge to the ChannelMonitorHistory entity by IDs. +func (m *ChannelMonitorMutation) RemoveHistoryIDs(ids ...int64) { + if m.removedhistory == nil { + m.removedhistory = make(map[int64]struct{}) + } + for i := range ids { + delete(m.history, ids[i]) + m.removedhistory[ids[i]] = struct{}{} + } +} + +// RemovedHistory returns the removed IDs of the "history" edge to the ChannelMonitorHistory entity. +func (m *ChannelMonitorMutation) RemovedHistoryIDs() (ids []int64) { + for id := range m.removedhistory { + ids = append(ids, id) + } + return +} + +// HistoryIDs returns the "history" edge IDs in the mutation. +func (m *ChannelMonitorMutation) HistoryIDs() (ids []int64) { + for id := range m.history { + ids = append(ids, id) + } + return +} + +// ResetHistory resets all changes to the "history" edge. +func (m *ChannelMonitorMutation) ResetHistory() { + m.history = nil + m.clearedhistory = false + m.removedhistory = nil +} + +// Where appends a list predicates to the ChannelMonitorMutation builder. +func (m *ChannelMonitorMutation) Where(ps ...predicate.ChannelMonitor) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ChannelMonitorMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ChannelMonitorMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ChannelMonitor, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ChannelMonitorMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ChannelMonitorMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ChannelMonitor). +func (m *ChannelMonitorMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ChannelMonitorMutation) Fields() []string { + fields := make([]string, 0, 13) + if m.created_at != nil { + fields = append(fields, channelmonitor.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, channelmonitor.FieldUpdatedAt) + } + if m.name != nil { + fields = append(fields, channelmonitor.FieldName) + } + if m.provider != nil { + fields = append(fields, channelmonitor.FieldProvider) + } + if m.endpoint != nil { + fields = append(fields, channelmonitor.FieldEndpoint) + } + if m.api_key_encrypted != nil { + fields = append(fields, channelmonitor.FieldAPIKeyEncrypted) + } + if m.primary_model != nil { + fields = append(fields, channelmonitor.FieldPrimaryModel) + } + if m.extra_models != nil { + fields = append(fields, channelmonitor.FieldExtraModels) + } + if m.group_name != nil { + fields = append(fields, channelmonitor.FieldGroupName) + } + if m.enabled != nil { + fields = append(fields, channelmonitor.FieldEnabled) + } + if m.interval_seconds != nil { + fields = append(fields, channelmonitor.FieldIntervalSeconds) + } + if m.last_checked_at != nil { + fields = append(fields, channelmonitor.FieldLastCheckedAt) + } + if m.created_by != nil { + fields = append(fields, channelmonitor.FieldCreatedBy) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ChannelMonitorMutation) Field(name string) (ent.Value, bool) { + switch name { + case channelmonitor.FieldCreatedAt: + return m.CreatedAt() + case channelmonitor.FieldUpdatedAt: + return m.UpdatedAt() + case channelmonitor.FieldName: + return m.Name() + case channelmonitor.FieldProvider: + return m.Provider() + case channelmonitor.FieldEndpoint: + return m.Endpoint() + case channelmonitor.FieldAPIKeyEncrypted: + return m.APIKeyEncrypted() + case channelmonitor.FieldPrimaryModel: + return m.PrimaryModel() + case channelmonitor.FieldExtraModels: + return m.ExtraModels() + case channelmonitor.FieldGroupName: + return m.GroupName() + case channelmonitor.FieldEnabled: + return m.Enabled() + case channelmonitor.FieldIntervalSeconds: + return m.IntervalSeconds() + case channelmonitor.FieldLastCheckedAt: + return m.LastCheckedAt() + case channelmonitor.FieldCreatedBy: + return m.CreatedBy() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ChannelMonitorMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case channelmonitor.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case channelmonitor.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case channelmonitor.FieldName: + return m.OldName(ctx) + case channelmonitor.FieldProvider: + return m.OldProvider(ctx) + case channelmonitor.FieldEndpoint: + return m.OldEndpoint(ctx) + case channelmonitor.FieldAPIKeyEncrypted: + return m.OldAPIKeyEncrypted(ctx) + case channelmonitor.FieldPrimaryModel: + return m.OldPrimaryModel(ctx) + case channelmonitor.FieldExtraModels: + return m.OldExtraModels(ctx) + case channelmonitor.FieldGroupName: + return m.OldGroupName(ctx) + case channelmonitor.FieldEnabled: + return m.OldEnabled(ctx) + case channelmonitor.FieldIntervalSeconds: + return m.OldIntervalSeconds(ctx) + case channelmonitor.FieldLastCheckedAt: + return m.OldLastCheckedAt(ctx) + case channelmonitor.FieldCreatedBy: + return m.OldCreatedBy(ctx) + } + return nil, fmt.Errorf("unknown ChannelMonitor field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorMutation) SetField(name string, value ent.Value) error { + switch name { + case channelmonitor.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case channelmonitor.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case channelmonitor.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case channelmonitor.FieldProvider: + v, ok := value.(channelmonitor.Provider) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProvider(v) + return nil + case channelmonitor.FieldEndpoint: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndpoint(v) + return nil + case channelmonitor.FieldAPIKeyEncrypted: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyEncrypted(v) + return nil + case channelmonitor.FieldPrimaryModel: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrimaryModel(v) + return nil + case channelmonitor.FieldExtraModels: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtraModels(v) + return nil + case channelmonitor.FieldGroupName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupName(v) + return nil + case channelmonitor.FieldEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEnabled(v) + return nil + case channelmonitor.FieldIntervalSeconds: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIntervalSeconds(v) + return nil + case channelmonitor.FieldLastCheckedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastCheckedAt(v) + return nil + case channelmonitor.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedBy(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitor field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ChannelMonitorMutation) AddedFields() []string { + var fields []string + if m.addinterval_seconds != nil { + fields = append(fields, channelmonitor.FieldIntervalSeconds) + } + if m.addcreated_by != nil { + fields = append(fields, channelmonitor.FieldCreatedBy) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ChannelMonitorMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case channelmonitor.FieldIntervalSeconds: + return m.AddedIntervalSeconds() + case channelmonitor.FieldCreatedBy: + return m.AddedCreatedBy() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorMutation) AddField(name string, value ent.Value) error { + switch name { + case channelmonitor.FieldIntervalSeconds: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddIntervalSeconds(v) + return nil + case channelmonitor.FieldCreatedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCreatedBy(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitor numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ChannelMonitorMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(channelmonitor.FieldGroupName) { + fields = append(fields, channelmonitor.FieldGroupName) + } + if m.FieldCleared(channelmonitor.FieldLastCheckedAt) { + fields = append(fields, channelmonitor.FieldLastCheckedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ChannelMonitorMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ChannelMonitorMutation) ClearField(name string) error { + switch name { + case channelmonitor.FieldGroupName: + m.ClearGroupName() + return nil + case channelmonitor.FieldLastCheckedAt: + m.ClearLastCheckedAt() + return nil + } + return fmt.Errorf("unknown ChannelMonitor nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ChannelMonitorMutation) ResetField(name string) error { + switch name { + case channelmonitor.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case channelmonitor.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case channelmonitor.FieldName: + m.ResetName() + return nil + case channelmonitor.FieldProvider: + m.ResetProvider() + return nil + case channelmonitor.FieldEndpoint: + m.ResetEndpoint() + return nil + case channelmonitor.FieldAPIKeyEncrypted: + m.ResetAPIKeyEncrypted() + return nil + case channelmonitor.FieldPrimaryModel: + m.ResetPrimaryModel() + return nil + case channelmonitor.FieldExtraModels: + m.ResetExtraModels() + return nil + case channelmonitor.FieldGroupName: + m.ResetGroupName() + return nil + case channelmonitor.FieldEnabled: + m.ResetEnabled() + return nil + case channelmonitor.FieldIntervalSeconds: + m.ResetIntervalSeconds() + return nil + case channelmonitor.FieldLastCheckedAt: + m.ResetLastCheckedAt() + return nil + case channelmonitor.FieldCreatedBy: + m.ResetCreatedBy() + return nil + } + return fmt.Errorf("unknown ChannelMonitor field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ChannelMonitorMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.history != nil { + edges = append(edges, channelmonitor.EdgeHistory) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ChannelMonitorMutation) AddedIDs(name string) []ent.Value { + switch name { + case channelmonitor.EdgeHistory: + ids := make([]ent.Value, 0, len(m.history)) + for id := range m.history { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ChannelMonitorMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedhistory != nil { + edges = append(edges, channelmonitor.EdgeHistory) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ChannelMonitorMutation) RemovedIDs(name string) []ent.Value { + switch name { + case channelmonitor.EdgeHistory: + ids := make([]ent.Value, 0, len(m.removedhistory)) + for id := range m.removedhistory { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ChannelMonitorMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedhistory { + edges = append(edges, channelmonitor.EdgeHistory) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ChannelMonitorMutation) EdgeCleared(name string) bool { + switch name { + case channelmonitor.EdgeHistory: + return m.clearedhistory + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ChannelMonitorMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown ChannelMonitor unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ChannelMonitorMutation) ResetEdge(name string) error { + switch name { + case channelmonitor.EdgeHistory: + m.ResetHistory() + return nil + } + return fmt.Errorf("unknown ChannelMonitor edge %s", name) +} + +// ChannelMonitorHistoryMutation represents an operation that mutates the ChannelMonitorHistory nodes in the graph. +type ChannelMonitorHistoryMutation struct { + config + op Op + typ string + id *int64 + model *string + status *channelmonitorhistory.Status + latency_ms *int + addlatency_ms *int + ping_latency_ms *int + addping_latency_ms *int + message *string + checked_at *time.Time + clearedFields map[string]struct{} + monitor *int64 + clearedmonitor bool + done bool + oldValue func(context.Context) (*ChannelMonitorHistory, error) + predicates []predicate.ChannelMonitorHistory +} + +var _ ent.Mutation = (*ChannelMonitorHistoryMutation)(nil) + +// channelmonitorhistoryOption allows management of the mutation configuration using functional options. +type channelmonitorhistoryOption func(*ChannelMonitorHistoryMutation) + +// newChannelMonitorHistoryMutation creates new mutation for the ChannelMonitorHistory entity. +func newChannelMonitorHistoryMutation(c config, op Op, opts ...channelmonitorhistoryOption) *ChannelMonitorHistoryMutation { + m := &ChannelMonitorHistoryMutation{ + config: c, + op: op, + typ: TypeChannelMonitorHistory, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withChannelMonitorHistoryID sets the ID field of the mutation. +func withChannelMonitorHistoryID(id int64) channelmonitorhistoryOption { + return func(m *ChannelMonitorHistoryMutation) { + var ( + err error + once sync.Once + value *ChannelMonitorHistory + ) + m.oldValue = func(ctx context.Context) (*ChannelMonitorHistory, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ChannelMonitorHistory.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withChannelMonitorHistory sets the old ChannelMonitorHistory of the mutation. +func withChannelMonitorHistory(node *ChannelMonitorHistory) channelmonitorhistoryOption { + return func(m *ChannelMonitorHistoryMutation) { + m.oldValue = func(context.Context) (*ChannelMonitorHistory, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ChannelMonitorHistoryMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ChannelMonitorHistoryMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ChannelMonitorHistoryMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ChannelMonitorHistory.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetMonitorID sets the "monitor_id" field. +func (m *ChannelMonitorHistoryMutation) SetMonitorID(i int64) { + m.monitor = &i +} + +// MonitorID returns the value of the "monitor_id" field in the mutation. +func (m *ChannelMonitorHistoryMutation) MonitorID() (r int64, exists bool) { + v := m.monitor + if v == nil { + return + } + return *v, true +} + +// OldMonitorID returns the old "monitor_id" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldMonitorID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonitorID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonitorID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonitorID: %w", err) + } + return oldValue.MonitorID, nil +} + +// ResetMonitorID resets all changes to the "monitor_id" field. +func (m *ChannelMonitorHistoryMutation) ResetMonitorID() { + m.monitor = nil +} + +// SetModel sets the "model" field. +func (m *ChannelMonitorHistoryMutation) SetModel(s string) { + m.model = &s +} + +// Model returns the value of the "model" field in the mutation. +func (m *ChannelMonitorHistoryMutation) Model() (r string, exists bool) { + v := m.model + if v == nil { + return + } + return *v, true +} + +// OldModel returns the old "model" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldModel(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldModel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldModel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldModel: %w", err) + } + return oldValue.Model, nil +} + +// ResetModel resets all changes to the "model" field. +func (m *ChannelMonitorHistoryMutation) ResetModel() { + m.model = nil +} + +// SetStatus sets the "status" field. +func (m *ChannelMonitorHistoryMutation) SetStatus(c channelmonitorhistory.Status) { + m.status = &c +} + +// Status returns the value of the "status" field in the mutation. +func (m *ChannelMonitorHistoryMutation) Status() (r channelmonitorhistory.Status, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldStatus(ctx context.Context) (v channelmonitorhistory.Status, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *ChannelMonitorHistoryMutation) ResetStatus() { + m.status = nil +} + +// SetLatencyMs sets the "latency_ms" field. +func (m *ChannelMonitorHistoryMutation) SetLatencyMs(i int) { + m.latency_ms = &i + m.addlatency_ms = nil +} + +// LatencyMs returns the value of the "latency_ms" field in the mutation. +func (m *ChannelMonitorHistoryMutation) LatencyMs() (r int, exists bool) { + v := m.latency_ms + if v == nil { + return + } + return *v, true +} + +// OldLatencyMs returns the old "latency_ms" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldLatencyMs(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLatencyMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLatencyMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLatencyMs: %w", err) + } + return oldValue.LatencyMs, nil +} + +// AddLatencyMs adds i to the "latency_ms" field. +func (m *ChannelMonitorHistoryMutation) AddLatencyMs(i int) { + if m.addlatency_ms != nil { + *m.addlatency_ms += i + } else { + m.addlatency_ms = &i + } +} + +// AddedLatencyMs returns the value that was added to the "latency_ms" field in this mutation. +func (m *ChannelMonitorHistoryMutation) AddedLatencyMs() (r int, exists bool) { + v := m.addlatency_ms + if v == nil { + return + } + return *v, true +} + +// ClearLatencyMs clears the value of the "latency_ms" field. +func (m *ChannelMonitorHistoryMutation) ClearLatencyMs() { + m.latency_ms = nil + m.addlatency_ms = nil + m.clearedFields[channelmonitorhistory.FieldLatencyMs] = struct{}{} +} + +// LatencyMsCleared returns if the "latency_ms" field was cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) LatencyMsCleared() bool { + _, ok := m.clearedFields[channelmonitorhistory.FieldLatencyMs] + return ok +} + +// ResetLatencyMs resets all changes to the "latency_ms" field. +func (m *ChannelMonitorHistoryMutation) ResetLatencyMs() { + m.latency_ms = nil + m.addlatency_ms = nil + delete(m.clearedFields, channelmonitorhistory.FieldLatencyMs) +} + +// SetPingLatencyMs sets the "ping_latency_ms" field. +func (m *ChannelMonitorHistoryMutation) SetPingLatencyMs(i int) { + m.ping_latency_ms = &i + m.addping_latency_ms = nil +} + +// PingLatencyMs returns the value of the "ping_latency_ms" field in the mutation. +func (m *ChannelMonitorHistoryMutation) PingLatencyMs() (r int, exists bool) { + v := m.ping_latency_ms + if v == nil { + return + } + return *v, true +} + +// OldPingLatencyMs returns the old "ping_latency_ms" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldPingLatencyMs(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPingLatencyMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPingLatencyMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPingLatencyMs: %w", err) + } + return oldValue.PingLatencyMs, nil +} + +// AddPingLatencyMs adds i to the "ping_latency_ms" field. +func (m *ChannelMonitorHistoryMutation) AddPingLatencyMs(i int) { + if m.addping_latency_ms != nil { + *m.addping_latency_ms += i + } else { + m.addping_latency_ms = &i + } +} + +// AddedPingLatencyMs returns the value that was added to the "ping_latency_ms" field in this mutation. +func (m *ChannelMonitorHistoryMutation) AddedPingLatencyMs() (r int, exists bool) { + v := m.addping_latency_ms + if v == nil { + return + } + return *v, true +} + +// ClearPingLatencyMs clears the value of the "ping_latency_ms" field. +func (m *ChannelMonitorHistoryMutation) ClearPingLatencyMs() { + m.ping_latency_ms = nil + m.addping_latency_ms = nil + m.clearedFields[channelmonitorhistory.FieldPingLatencyMs] = struct{}{} +} + +// PingLatencyMsCleared returns if the "ping_latency_ms" field was cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) PingLatencyMsCleared() bool { + _, ok := m.clearedFields[channelmonitorhistory.FieldPingLatencyMs] + return ok +} + +// ResetPingLatencyMs resets all changes to the "ping_latency_ms" field. +func (m *ChannelMonitorHistoryMutation) ResetPingLatencyMs() { + m.ping_latency_ms = nil + m.addping_latency_ms = nil + delete(m.clearedFields, channelmonitorhistory.FieldPingLatencyMs) +} + +// SetMessage sets the "message" field. +func (m *ChannelMonitorHistoryMutation) SetMessage(s string) { + m.message = &s +} + +// Message returns the value of the "message" field in the mutation. +func (m *ChannelMonitorHistoryMutation) Message() (r string, exists bool) { + v := m.message + if v == nil { + return + } + return *v, true +} + +// OldMessage returns the old "message" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMessage: %w", err) + } + return oldValue.Message, nil +} + +// ClearMessage clears the value of the "message" field. +func (m *ChannelMonitorHistoryMutation) ClearMessage() { + m.message = nil + m.clearedFields[channelmonitorhistory.FieldMessage] = struct{}{} +} + +// MessageCleared returns if the "message" field was cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) MessageCleared() bool { + _, ok := m.clearedFields[channelmonitorhistory.FieldMessage] + return ok +} + +// ResetMessage resets all changes to the "message" field. +func (m *ChannelMonitorHistoryMutation) ResetMessage() { + m.message = nil + delete(m.clearedFields, channelmonitorhistory.FieldMessage) +} + +// SetCheckedAt sets the "checked_at" field. +func (m *ChannelMonitorHistoryMutation) SetCheckedAt(t time.Time) { + m.checked_at = &t +} + +// CheckedAt returns the value of the "checked_at" field in the mutation. +func (m *ChannelMonitorHistoryMutation) CheckedAt() (r time.Time, exists bool) { + v := m.checked_at + if v == nil { + return + } + return *v, true +} + +// OldCheckedAt returns the old "checked_at" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldCheckedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCheckedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCheckedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCheckedAt: %w", err) + } + return oldValue.CheckedAt, nil +} + +// ResetCheckedAt resets all changes to the "checked_at" field. +func (m *ChannelMonitorHistoryMutation) ResetCheckedAt() { + m.checked_at = nil +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (m *ChannelMonitorHistoryMutation) ClearMonitor() { + m.clearedmonitor = true + m.clearedFields[channelmonitorhistory.FieldMonitorID] = struct{}{} +} + +// MonitorCleared reports if the "monitor" edge to the ChannelMonitor entity was cleared. +func (m *ChannelMonitorHistoryMutation) MonitorCleared() bool { + return m.clearedmonitor +} + +// MonitorIDs returns the "monitor" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// MonitorID instead. It exists only for internal usage by the builders. +func (m *ChannelMonitorHistoryMutation) MonitorIDs() (ids []int64) { + if id := m.monitor; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetMonitor resets all changes to the "monitor" edge. +func (m *ChannelMonitorHistoryMutation) ResetMonitor() { + m.monitor = nil + m.clearedmonitor = false +} + +// Where appends a list predicates to the ChannelMonitorHistoryMutation builder. +func (m *ChannelMonitorHistoryMutation) Where(ps ...predicate.ChannelMonitorHistory) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ChannelMonitorHistoryMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ChannelMonitorHistoryMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ChannelMonitorHistory, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ChannelMonitorHistoryMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ChannelMonitorHistoryMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ChannelMonitorHistory). +func (m *ChannelMonitorHistoryMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ChannelMonitorHistoryMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.monitor != nil { + fields = append(fields, channelmonitorhistory.FieldMonitorID) + } + if m.model != nil { + fields = append(fields, channelmonitorhistory.FieldModel) + } + if m.status != nil { + fields = append(fields, channelmonitorhistory.FieldStatus) + } + if m.latency_ms != nil { + fields = append(fields, channelmonitorhistory.FieldLatencyMs) + } + if m.ping_latency_ms != nil { + fields = append(fields, channelmonitorhistory.FieldPingLatencyMs) + } + if m.message != nil { + fields = append(fields, channelmonitorhistory.FieldMessage) + } + if m.checked_at != nil { + fields = append(fields, channelmonitorhistory.FieldCheckedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) { + switch name { + case channelmonitorhistory.FieldMonitorID: + return m.MonitorID() + case channelmonitorhistory.FieldModel: + return m.Model() + case channelmonitorhistory.FieldStatus: + return m.Status() + case channelmonitorhistory.FieldLatencyMs: + return m.LatencyMs() + case channelmonitorhistory.FieldPingLatencyMs: + return m.PingLatencyMs() + case channelmonitorhistory.FieldMessage: + return m.Message() + case channelmonitorhistory.FieldCheckedAt: + return m.CheckedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case channelmonitorhistory.FieldMonitorID: + return m.OldMonitorID(ctx) + case channelmonitorhistory.FieldModel: + return m.OldModel(ctx) + case channelmonitorhistory.FieldStatus: + return m.OldStatus(ctx) + case channelmonitorhistory.FieldLatencyMs: + return m.OldLatencyMs(ctx) + case channelmonitorhistory.FieldPingLatencyMs: + return m.OldPingLatencyMs(ctx) + case channelmonitorhistory.FieldMessage: + return m.OldMessage(ctx) + case channelmonitorhistory.FieldCheckedAt: + return m.OldCheckedAt(ctx) + } + return nil, fmt.Errorf("unknown ChannelMonitorHistory field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorHistoryMutation) SetField(name string, value ent.Value) error { + switch name { + case channelmonitorhistory.FieldMonitorID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonitorID(v) + return nil + case channelmonitorhistory.FieldModel: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetModel(v) + return nil + case channelmonitorhistory.FieldStatus: + v, ok := value.(channelmonitorhistory.Status) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case channelmonitorhistory.FieldLatencyMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLatencyMs(v) + return nil + case channelmonitorhistory.FieldPingLatencyMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPingLatencyMs(v) + return nil + case channelmonitorhistory.FieldMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMessage(v) + return nil + case channelmonitorhistory.FieldCheckedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCheckedAt(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ChannelMonitorHistoryMutation) AddedFields() []string { + var fields []string + if m.addlatency_ms != nil { + fields = append(fields, channelmonitorhistory.FieldLatencyMs) + } + if m.addping_latency_ms != nil { + fields = append(fields, channelmonitorhistory.FieldPingLatencyMs) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ChannelMonitorHistoryMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case channelmonitorhistory.FieldLatencyMs: + return m.AddedLatencyMs() + case channelmonitorhistory.FieldPingLatencyMs: + return m.AddedPingLatencyMs() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorHistoryMutation) AddField(name string, value ent.Value) error { + switch name { + case channelmonitorhistory.FieldLatencyMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddLatencyMs(v) + return nil + case channelmonitorhistory.FieldPingLatencyMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPingLatencyMs(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ChannelMonitorHistoryMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(channelmonitorhistory.FieldLatencyMs) { + fields = append(fields, channelmonitorhistory.FieldLatencyMs) + } + if m.FieldCleared(channelmonitorhistory.FieldPingLatencyMs) { + fields = append(fields, channelmonitorhistory.FieldPingLatencyMs) + } + if m.FieldCleared(channelmonitorhistory.FieldMessage) { + fields = append(fields, channelmonitorhistory.FieldMessage) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ChannelMonitorHistoryMutation) ClearField(name string) error { + switch name { + case channelmonitorhistory.FieldLatencyMs: + m.ClearLatencyMs() + return nil + case channelmonitorhistory.FieldPingLatencyMs: + m.ClearPingLatencyMs() + return nil + case channelmonitorhistory.FieldMessage: + m.ClearMessage() + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ChannelMonitorHistoryMutation) ResetField(name string) error { + switch name { + case channelmonitorhistory.FieldMonitorID: + m.ResetMonitorID() + return nil + case channelmonitorhistory.FieldModel: + m.ResetModel() + return nil + case channelmonitorhistory.FieldStatus: + m.ResetStatus() + return nil + case channelmonitorhistory.FieldLatencyMs: + m.ResetLatencyMs() + return nil + case channelmonitorhistory.FieldPingLatencyMs: + m.ResetPingLatencyMs() + return nil + case channelmonitorhistory.FieldMessage: + m.ResetMessage() + return nil + case channelmonitorhistory.FieldCheckedAt: + m.ResetCheckedAt() + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ChannelMonitorHistoryMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.monitor != nil { + edges = append(edges, channelmonitorhistory.EdgeMonitor) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ChannelMonitorHistoryMutation) AddedIDs(name string) []ent.Value { + switch name { + case channelmonitorhistory.EdgeMonitor: + if id := m.monitor; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ChannelMonitorHistoryMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ChannelMonitorHistoryMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedmonitor { + edges = append(edges, channelmonitorhistory.EdgeMonitor) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) EdgeCleared(name string) bool { + switch name { + case channelmonitorhistory.EdgeMonitor: + return m.clearedmonitor + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ChannelMonitorHistoryMutation) ClearEdge(name string) error { + switch name { + case channelmonitorhistory.EdgeMonitor: + m.ClearMonitor() + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ChannelMonitorHistoryMutation) ResetEdge(name string) error { + switch name { + case channelmonitorhistory.EdgeMonitor: + m.ResetMonitor() + return nil + } + return fmt.Errorf("unknown ChannelMonitorHistory edge %s", name) +} + // ErrorPassthroughRuleMutation represents an operation that mutates the ErrorPassthroughRule nodes in the graph. type ErrorPassthroughRuleMutation struct { config diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 0aa90b90..256b5f2a 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -27,6 +27,12 @@ type AuthIdentity func(*sql.Selector) // AuthIdentityChannel is the predicate function for authidentitychannel builders. type AuthIdentityChannel func(*sql.Selector) +// ChannelMonitor is the predicate function for channelmonitor builders. +type ChannelMonitor func(*sql.Selector) + +// ChannelMonitorHistory is the predicate function for channelmonitorhistory builders. +type ChannelMonitorHistory func(*sql.Selector) + // ErrorPassthroughRule is the predicate function for errorpassthroughrule builders. type ErrorPassthroughRule func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index bdb7f7a9..0183f377 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -12,6 +12,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" @@ -427,6 +429,127 @@ func init() { authidentitychannelDescMetadata := authidentitychannelFields[6].Descriptor() // authidentitychannel.DefaultMetadata holds the default value on creation for the metadata field. authidentitychannel.DefaultMetadata = authidentitychannelDescMetadata.Default.(func() map[string]interface{}) + channelmonitorMixin := schema.ChannelMonitor{}.Mixin() + channelmonitorMixinFields0 := channelmonitorMixin[0].Fields() + _ = channelmonitorMixinFields0 + channelmonitorFields := schema.ChannelMonitor{}.Fields() + _ = channelmonitorFields + // channelmonitorDescCreatedAt is the schema descriptor for created_at field. + channelmonitorDescCreatedAt := channelmonitorMixinFields0[0].Descriptor() + // channelmonitor.DefaultCreatedAt holds the default value on creation for the created_at field. + channelmonitor.DefaultCreatedAt = channelmonitorDescCreatedAt.Default.(func() time.Time) + // channelmonitorDescUpdatedAt is the schema descriptor for updated_at field. + channelmonitorDescUpdatedAt := channelmonitorMixinFields0[1].Descriptor() + // channelmonitor.DefaultUpdatedAt holds the default value on creation for the updated_at field. + channelmonitor.DefaultUpdatedAt = channelmonitorDescUpdatedAt.Default.(func() time.Time) + // channelmonitor.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + channelmonitor.UpdateDefaultUpdatedAt = channelmonitorDescUpdatedAt.UpdateDefault.(func() time.Time) + // channelmonitorDescName is the schema descriptor for name field. + channelmonitorDescName := channelmonitorFields[0].Descriptor() + // channelmonitor.NameValidator is a validator for the "name" field. It is called by the builders before save. + channelmonitor.NameValidator = func() func(string) error { + validators := channelmonitorDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // channelmonitorDescEndpoint is the schema descriptor for endpoint field. + channelmonitorDescEndpoint := channelmonitorFields[2].Descriptor() + // channelmonitor.EndpointValidator is a validator for the "endpoint" field. It is called by the builders before save. + channelmonitor.EndpointValidator = func() func(string) error { + validators := channelmonitorDescEndpoint.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(endpoint string) error { + for _, fn := range fns { + if err := fn(endpoint); err != nil { + return err + } + } + return nil + } + }() + // channelmonitorDescAPIKeyEncrypted is the schema descriptor for api_key_encrypted field. + channelmonitorDescAPIKeyEncrypted := channelmonitorFields[3].Descriptor() + // channelmonitor.APIKeyEncryptedValidator is a validator for the "api_key_encrypted" field. It is called by the builders before save. + channelmonitor.APIKeyEncryptedValidator = channelmonitorDescAPIKeyEncrypted.Validators[0].(func(string) error) + // channelmonitorDescPrimaryModel is the schema descriptor for primary_model field. + channelmonitorDescPrimaryModel := channelmonitorFields[4].Descriptor() + // channelmonitor.PrimaryModelValidator is a validator for the "primary_model" field. It is called by the builders before save. + channelmonitor.PrimaryModelValidator = func() func(string) error { + validators := channelmonitorDescPrimaryModel.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(primary_model string) error { + for _, fn := range fns { + if err := fn(primary_model); err != nil { + return err + } + } + return nil + } + }() + // channelmonitorDescExtraModels is the schema descriptor for extra_models field. + channelmonitorDescExtraModels := channelmonitorFields[5].Descriptor() + // channelmonitor.DefaultExtraModels holds the default value on creation for the extra_models field. + channelmonitor.DefaultExtraModels = channelmonitorDescExtraModels.Default.([]string) + // channelmonitorDescGroupName is the schema descriptor for group_name field. + channelmonitorDescGroupName := channelmonitorFields[6].Descriptor() + // channelmonitor.DefaultGroupName holds the default value on creation for the group_name field. + channelmonitor.DefaultGroupName = channelmonitorDescGroupName.Default.(string) + // channelmonitor.GroupNameValidator is a validator for the "group_name" field. It is called by the builders before save. + channelmonitor.GroupNameValidator = channelmonitorDescGroupName.Validators[0].(func(string) error) + // channelmonitorDescEnabled is the schema descriptor for enabled field. + channelmonitorDescEnabled := channelmonitorFields[7].Descriptor() + // channelmonitor.DefaultEnabled holds the default value on creation for the enabled field. + channelmonitor.DefaultEnabled = channelmonitorDescEnabled.Default.(bool) + // channelmonitorDescIntervalSeconds is the schema descriptor for interval_seconds field. + channelmonitorDescIntervalSeconds := channelmonitorFields[8].Descriptor() + // channelmonitor.IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save. + channelmonitor.IntervalSecondsValidator = channelmonitorDescIntervalSeconds.Validators[0].(func(int) error) + channelmonitorhistoryFields := schema.ChannelMonitorHistory{}.Fields() + _ = channelmonitorhistoryFields + // channelmonitorhistoryDescModel is the schema descriptor for model field. + channelmonitorhistoryDescModel := channelmonitorhistoryFields[1].Descriptor() + // channelmonitorhistory.ModelValidator is a validator for the "model" field. It is called by the builders before save. + channelmonitorhistory.ModelValidator = func() func(string) error { + validators := channelmonitorhistoryDescModel.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(model string) error { + for _, fn := range fns { + if err := fn(model); err != nil { + return err + } + } + return nil + } + }() + // channelmonitorhistoryDescMessage is the schema descriptor for message field. + channelmonitorhistoryDescMessage := channelmonitorhistoryFields[5].Descriptor() + // channelmonitorhistory.DefaultMessage holds the default value on creation for the message field. + channelmonitorhistory.DefaultMessage = channelmonitorhistoryDescMessage.Default.(string) + // channelmonitorhistory.MessageValidator is a validator for the "message" field. It is called by the builders before save. + channelmonitorhistory.MessageValidator = channelmonitorhistoryDescMessage.Validators[0].(func(string) error) + // channelmonitorhistoryDescCheckedAt is the schema descriptor for checked_at field. + channelmonitorhistoryDescCheckedAt := channelmonitorhistoryFields[6].Descriptor() + // channelmonitorhistory.DefaultCheckedAt holds the default value on creation for the checked_at field. + channelmonitorhistory.DefaultCheckedAt = channelmonitorhistoryDescCheckedAt.Default.(func() time.Time) errorpassthroughruleMixin := schema.ErrorPassthroughRule{}.Mixin() errorpassthroughruleMixinFields0 := errorpassthroughruleMixin[0].Fields() _ = errorpassthroughruleMixinFields0 diff --git a/backend/ent/schema/channel_monitor.go b/backend/ent/schema/channel_monitor.go new file mode 100644 index 00000000..3fa17319 --- /dev/null +++ b/backend/ent/schema/channel_monitor.go @@ -0,0 +1,81 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// ChannelMonitor holds the schema definition for the ChannelMonitor entity. +// 渠道监控配置:定期对指定 provider/endpoint/api_key 下的模型做心跳测试。 +type ChannelMonitor struct { + ent.Schema +} + +func (ChannelMonitor) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "channel_monitors"}, + } +} + +func (ChannelMonitor) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + } +} + +func (ChannelMonitor) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + NotEmpty(). + MaxLen(100), + field.Enum("provider"). + Values("openai", "anthropic", "gemini"), + field.String("endpoint"). + NotEmpty(). + MaxLen(500). + Comment("Provider base origin, e.g. https://api.openai.com"), + field.String("api_key_encrypted"). + NotEmpty(). + Sensitive(). + Comment("AES-256-GCM encrypted API key"), + field.String("primary_model"). + NotEmpty(). + MaxLen(200), + field.JSON("extra_models", []string{}). + Default([]string{}). + Comment("Additional model names to test alongside primary_model"), + field.String("group_name"). + Optional(). + Default(""). + MaxLen(100), + field.Bool("enabled"). + Default(true), + field.Int("interval_seconds"). + Range(15, 3600), + field.Time("last_checked_at"). + Optional(). + Nillable(), + field.Int64("created_by"), + } +} + +func (ChannelMonitor) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("history", ChannelMonitorHistory.Type). + Annotations(entsql.OnDelete(entsql.Cascade)), + } +} + +func (ChannelMonitor) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("enabled", "last_checked_at"), + index.Fields("provider"), + index.Fields("group_name"), + } +} diff --git a/backend/ent/schema/channel_monitor_history.go b/backend/ent/schema/channel_monitor_history.go new file mode 100644 index 00000000..50352016 --- /dev/null +++ b/backend/ent/schema/channel_monitor_history.go @@ -0,0 +1,64 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// ChannelMonitorHistory holds the schema definition for the ChannelMonitorHistory entity. +// 渠道监控历史:每次检测每个模型一行记录,由调度器写入,定期清理 30 天前的旧数据。 +type ChannelMonitorHistory struct { + ent.Schema +} + +func (ChannelMonitorHistory) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "channel_monitor_histories"}, + } +} + +func (ChannelMonitorHistory) Fields() []ent.Field { + return []ent.Field{ + field.Int64("monitor_id"), + field.String("model"). + NotEmpty(). + MaxLen(200), + field.Enum("status"). + Values("operational", "degraded", "failed", "error"), + field.Int("latency_ms"). + Optional(). + Nillable(), + field.Int("ping_latency_ms"). + Optional(). + Nillable(), + field.String("message"). + Optional(). + Default(""). + MaxLen(500), + field.Time("checked_at"). + Default(time.Now), + } +} + +func (ChannelMonitorHistory) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("monitor", ChannelMonitor.Type). + Ref("history"). + Field("monitor_id"). + Unique(). + Required(), + } +} + +func (ChannelMonitorHistory) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("monitor_id", "model", "checked_at"), + index.Fields("checked_at"), + } +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go index bde3e35b..f937270f 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -28,6 +28,10 @@ type Tx struct { AuthIdentity *AuthIdentityClient // AuthIdentityChannel is the client for interacting with the AuthIdentityChannel builders. AuthIdentityChannel *AuthIdentityChannelClient + // ChannelMonitor is the client for interacting with the ChannelMonitor builders. + ChannelMonitor *ChannelMonitorClient + // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. + ChannelMonitorHistory *ChannelMonitorHistoryClient // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. @@ -212,6 +216,8 @@ func (tx *Tx) init() { tx.AnnouncementRead = NewAnnouncementReadClient(tx.config) tx.AuthIdentity = NewAuthIdentityClient(tx.config) tx.AuthIdentityChannel = NewAuthIdentityChannelClient(tx.config) + tx.ChannelMonitor = NewChannelMonitorClient(tx.config) + tx.ChannelMonitorHistory = NewChannelMonitorHistoryClient(tx.config) tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config) tx.Group = NewGroupClient(tx.config) tx.IdempotencyRecord = NewIdempotencyRecordClient(tx.config) diff --git a/backend/go.sum b/backend/go.sum index f1c864f5..0f366ee1 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -162,6 +162,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= @@ -181,6 +183,8 @@ github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4= github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y= github.com/imroc/req/v3 v3.57.0 h1:LMTUjNRUybUkTPn8oJDq8Kg3JRBOBTcnDhKu7mzupKI= github.com/imroc/req/v3 v3.57.0/go.mod h1:JL62ey1nvSLq81HORNcosvlf7SxZStONNqOprg0Pz00= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -216,6 +220,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= @@ -249,6 +255,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -278,6 +286,8 @@ github.com/refraction-networking/utls v1.8.2 h1:j4Q1gJj0xngdeH+Ox/qND11aEfhpgoEv github.com/refraction-networking/utls v1.8.2/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -310,6 +320,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= diff --git a/backend/internal/handler/admin/channel_monitor_handler.go b/backend/internal/handler/admin/channel_monitor_handler.go new file mode 100644 index 00000000..ce86c3dc --- /dev/null +++ b/backend/internal/handler/admin/channel_monitor_handler.go @@ -0,0 +1,396 @@ +package admin + +import ( + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +const ( + // monitorMaxPageSize 列表分页上限。 + monitorMaxPageSize = 100 + // monitorAPIKeyMaskPrefix 脱敏时保留的明文前缀长度。 + monitorAPIKeyMaskPrefix = 4 + // monitorAPIKeyMaskSuffix 脱敏后追加的占位字符串。 + monitorAPIKeyMaskSuffix = "***" +) + +// ChannelMonitorHandler 渠道监控管理后台 handler。 +type ChannelMonitorHandler struct { + monitorService *service.ChannelMonitorService +} + +// NewChannelMonitorHandler 创建 handler。 +func NewChannelMonitorHandler(monitorService *service.ChannelMonitorService) *ChannelMonitorHandler { + return &ChannelMonitorHandler{monitorService: monitorService} +} + +// --- Request / Response --- + +type channelMonitorCreateRequest struct { + Name string `json:"name" binding:"required,max=100"` + Provider string `json:"provider" binding:"required,oneof=openai anthropic gemini"` + Endpoint string `json:"endpoint" binding:"required,max=500"` + APIKey string `json:"api_key" binding:"required,max=2000"` + PrimaryModel string `json:"primary_model" binding:"required,max=200"` + ExtraModels []string `json:"extra_models"` + GroupName string `json:"group_name" binding:"max=100"` + Enabled *bool `json:"enabled"` + IntervalSeconds int `json:"interval_seconds" binding:"required,min=15,max=3600"` +} + +type channelMonitorUpdateRequest struct { + Name *string `json:"name" binding:"omitempty,max=100"` + Provider *string `json:"provider" binding:"omitempty,oneof=openai anthropic gemini"` + Endpoint *string `json:"endpoint" binding:"omitempty,max=500"` + APIKey *string `json:"api_key" binding:"omitempty,max=2000"` + PrimaryModel *string `json:"primary_model" binding:"omitempty,max=200"` + ExtraModels *[]string `json:"extra_models"` + GroupName *string `json:"group_name" binding:"omitempty,max=100"` + Enabled *bool `json:"enabled"` + IntervalSeconds *int `json:"interval_seconds" binding:"omitempty,min=15,max=3600"` +} + +type channelMonitorResponse struct { + ID int64 `json:"id"` + Name string `json:"name"` + Provider string `json:"provider"` + Endpoint string `json:"endpoint"` + APIKeyMasked string `json:"api_key_masked"` + APIKeyDecryptFailed bool `json:"api_key_decrypt_failed"` + PrimaryModel string `json:"primary_model"` + ExtraModels []string `json:"extra_models"` + GroupName string `json:"group_name"` + Enabled bool `json:"enabled"` + IntervalSeconds int `json:"interval_seconds"` + LastCheckedAt *string `json:"last_checked_at"` + CreatedBy int64 `json:"created_by"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + PrimaryStatus string `json:"primary_status"` + PrimaryLatencyMs *int `json:"primary_latency_ms"` + Availability7d float64 `json:"availability_7d"` + ExtraModelsStatus []dto.ChannelMonitorExtraModelStatus `json:"extra_models_status"` +} + +type channelMonitorCheckResultResponse struct { + Model string `json:"model"` + Status string `json:"status"` + LatencyMs *int `json:"latency_ms"` + PingLatencyMs *int `json:"ping_latency_ms"` + Message string `json:"message"` + CheckedAt string `json:"checked_at"` +} + +type channelMonitorHistoryItemResponse struct { + ID int64 `json:"id"` + Model string `json:"model"` + Status string `json:"status"` + LatencyMs *int `json:"latency_ms"` + PingLatencyMs *int `json:"ping_latency_ms"` + Message string `json:"message"` + CheckedAt string `json:"checked_at"` +} + +// maskAPIKey 对 API Key 明文做脱敏:前 4 字符 + "***",长度 ≤ 4 时只显示 "***"。 +func maskAPIKey(plain string) string { + if len(plain) <= monitorAPIKeyMaskPrefix { + return monitorAPIKeyMaskSuffix + } + return plain[:monitorAPIKeyMaskPrefix] + monitorAPIKeyMaskSuffix +} + +func channelMonitorToResponse(m *service.ChannelMonitor) *channelMonitorResponse { + if m == nil { + return nil + } + extras := m.ExtraModels + if extras == nil { + extras = []string{} + } + resp := &channelMonitorResponse{ + ID: m.ID, + Name: m.Name, + Provider: m.Provider, + Endpoint: m.Endpoint, + APIKeyMasked: maskAPIKey(m.APIKey), + APIKeyDecryptFailed: m.APIKeyDecryptFailed, + PrimaryModel: m.PrimaryModel, + ExtraModels: extras, + GroupName: m.GroupName, + Enabled: m.Enabled, + IntervalSeconds: m.IntervalSeconds, + CreatedBy: m.CreatedBy, + CreatedAt: m.CreatedAt.UTC().Format(time.RFC3339), + UpdatedAt: m.UpdatedAt.UTC().Format(time.RFC3339), + // PrimaryStatus / PrimaryLatencyMs / Availability7d 由 List handler 在批量聚合后填充。 + } + if m.LastCheckedAt != nil { + s := m.LastCheckedAt.UTC().Format(time.RFC3339) + resp.LastCheckedAt = &s + } + return resp +} + +func checkResultToResponse(r *service.CheckResult) channelMonitorCheckResultResponse { + return channelMonitorCheckResultResponse{ + Model: r.Model, + Status: r.Status, + LatencyMs: r.LatencyMs, + PingLatencyMs: r.PingLatencyMs, + Message: r.Message, + CheckedAt: r.CheckedAt.UTC().Format(time.RFC3339), + } +} + +func historyEntryToResponse(e *service.ChannelMonitorHistoryEntry) channelMonitorHistoryItemResponse { + return channelMonitorHistoryItemResponse{ + ID: e.ID, + Model: e.Model, + Status: e.Status, + LatencyMs: e.LatencyMs, + PingLatencyMs: e.PingLatencyMs, + Message: e.Message, + CheckedAt: e.CheckedAt.UTC().Format(time.RFC3339), + } +} + +// ParseChannelMonitorID 提取并校验路径参数 :id(admin 与 user handler 共享)。 +// 校验失败时已写入 4xx 响应,调用方只需 return。 +func ParseChannelMonitorID(c *gin.Context) (int64, bool) { + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.ErrorFrom(c, infraerrors.BadRequest("INVALID_MONITOR_ID", "invalid monitor id")) + return 0, false + } + return id, true +} + +// parseListEnabled 解析 enabled query 参数:true/false 转为 *bool,空或非法则返回 nil。 +func parseListEnabled(raw string) *bool { + switch strings.ToLower(strings.TrimSpace(raw)) { + case "true", "1", "yes": + v := true + return &v + case "false", "0", "no": + v := false + return &v + default: + return nil + } +} + +// --- Handlers --- + +// List GET /api/v1/admin/channel-monitors +func (h *ChannelMonitorHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + if pageSize > monitorMaxPageSize { + pageSize = monitorMaxPageSize + } + + params := service.ChannelMonitorListParams{ + Page: page, + PageSize: pageSize, + Provider: strings.TrimSpace(c.Query("provider")), + Enabled: parseListEnabled(c.Query("enabled")), + Search: strings.TrimSpace(c.Query("search")), + } + + items, total, err := h.monitorService.List(c.Request.Context(), params) + if err != nil { + response.ErrorFrom(c, err) + return + } + + summaries := h.batchSummaryFor(c, items) + out := make([]*channelMonitorResponse, 0, len(items)) + for _, m := range items { + out = append(out, buildListItemResponse(m, summaries[m.ID])) + } + response.Paginated(c, out, total, page, pageSize) +} + +// batchSummaryFor 批量聚合 latest + 7d 可用率,避免每行 2 次 SQL(消除 N+1)。 +func (h *ChannelMonitorHandler) batchSummaryFor(c *gin.Context, items []*service.ChannelMonitor) map[int64]service.MonitorStatusSummary { + ids := make([]int64, 0, len(items)) + primaryByID := make(map[int64]string, len(items)) + extrasByID := make(map[int64][]string, len(items)) + for _, m := range items { + ids = append(ids, m.ID) + primaryByID[m.ID] = m.PrimaryModel + extrasByID[m.ID] = m.ExtraModels + } + return h.monitorService.BatchMonitorStatusSummary(c.Request.Context(), ids, primaryByID, extrasByID) +} + +// buildListItemResponse 把 monitor + summary 装成 admin list 的响应行。 +func buildListItemResponse(m *service.ChannelMonitor, summary service.MonitorStatusSummary) *channelMonitorResponse { + resp := channelMonitorToResponse(m) + resp.PrimaryStatus = summary.PrimaryStatus + resp.PrimaryLatencyMs = summary.PrimaryLatencyMs + resp.Availability7d = summary.Availability7d + resp.ExtraModelsStatus = make([]dto.ChannelMonitorExtraModelStatus, 0, len(summary.ExtraModels)) + for _, e := range summary.ExtraModels { + resp.ExtraModelsStatus = append(resp.ExtraModelsStatus, dto.ChannelMonitorExtraModelStatus{ + Model: e.Model, + Status: e.Status, + LatencyMs: e.LatencyMs, + }) + } + return resp +} + +// Get GET /api/v1/admin/channel-monitors/:id +func (h *ChannelMonitorHandler) Get(c *gin.Context) { + id, ok := ParseChannelMonitorID(c) + if !ok { + return + } + m, err := h.monitorService.Get(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, channelMonitorToResponse(m)) +} + +// Create POST /api/v1/admin/channel-monitors +func (h *ChannelMonitorHandler) Create(c *gin.Context) { + var req channelMonitorCreateRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error())) + return + } + + subject, _ := middleware2.GetAuthSubjectFromContext(c) + + enabled := true + if req.Enabled != nil { + enabled = *req.Enabled + } + + m, err := h.monitorService.Create(c.Request.Context(), service.ChannelMonitorCreateParams{ + Name: req.Name, + Provider: req.Provider, + Endpoint: req.Endpoint, + APIKey: req.APIKey, + PrimaryModel: req.PrimaryModel, + ExtraModels: req.ExtraModels, + GroupName: req.GroupName, + Enabled: enabled, + IntervalSeconds: req.IntervalSeconds, + CreatedBy: subject.UserID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Created(c, channelMonitorToResponse(m)) +} + +// Update PUT /api/v1/admin/channel-monitors/:id +func (h *ChannelMonitorHandler) Update(c *gin.Context) { + id, ok := ParseChannelMonitorID(c) + if !ok { + return + } + var req channelMonitorUpdateRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.ErrorFrom(c, infraerrors.BadRequest("VALIDATION_ERROR", err.Error())) + return + } + + m, err := h.monitorService.Update(c.Request.Context(), id, service.ChannelMonitorUpdateParams{ + Name: req.Name, + Provider: req.Provider, + Endpoint: req.Endpoint, + APIKey: req.APIKey, + PrimaryModel: req.PrimaryModel, + ExtraModels: req.ExtraModels, + GroupName: req.GroupName, + Enabled: req.Enabled, + IntervalSeconds: req.IntervalSeconds, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, channelMonitorToResponse(m)) +} + +// Delete DELETE /api/v1/admin/channel-monitors/:id +func (h *ChannelMonitorHandler) Delete(c *gin.Context) { + id, ok := ParseChannelMonitorID(c) + if !ok { + return + } + if err := h.monitorService.Delete(c.Request.Context(), id); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, nil) +} + +// Run POST /api/v1/admin/channel-monitors/:id/run +func (h *ChannelMonitorHandler) Run(c *gin.Context) { + id, ok := ParseChannelMonitorID(c) + if !ok { + return + } + results, err := h.monitorService.RunCheck(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + out := make([]channelMonitorCheckResultResponse, 0, len(results)) + for _, r := range results { + out = append(out, checkResultToResponse(r)) + } + response.Success(c, gin.H{"results": out}) +} + +// History GET /api/v1/admin/channel-monitors/:id/history +func (h *ChannelMonitorHandler) History(c *gin.Context) { + id, ok := ParseChannelMonitorID(c) + if !ok { + return + } + limit := parseHistoryLimit(c.Query("limit")) + model := strings.TrimSpace(c.Query("model")) + + entries, err := h.monitorService.ListHistory(c.Request.Context(), id, model, limit) + if err != nil { + response.ErrorFrom(c, err) + return + } + out := make([]channelMonitorHistoryItemResponse, 0, len(entries)) + for _, e := range entries { + out = append(out, historyEntryToResponse(e)) + } + response.Success(c, gin.H{"items": out}) +} + +// parseHistoryLimit 解析 history 接口的 limit query。 +// 使用 service 包的统一上下限常量,避免在 handler 重复定义同名魔法值。 +func parseHistoryLimit(raw string) int { + if strings.TrimSpace(raw) == "" { + return service.MonitorHistoryDefaultLimit + } + v, err := strconv.Atoi(raw) + if err != nil || v <= 0 { + return service.MonitorHistoryDefaultLimit + } + if v > service.MonitorHistoryMaxLimit { + return service.MonitorHistoryMaxLimit + } + return v +} diff --git a/backend/internal/handler/channel_monitor_user_handler.go b/backend/internal/handler/channel_monitor_user_handler.go new file mode 100644 index 00000000..a031b4a2 --- /dev/null +++ b/backend/internal/handler/channel_monitor_user_handler.go @@ -0,0 +1,127 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/admin" + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// ChannelMonitorUserHandler 渠道监控用户只读 handler。 +type ChannelMonitorUserHandler struct { + monitorService *service.ChannelMonitorService +} + +// NewChannelMonitorUserHandler 创建 handler。 +func NewChannelMonitorUserHandler(monitorService *service.ChannelMonitorService) *ChannelMonitorUserHandler { + return &ChannelMonitorUserHandler{monitorService: monitorService} +} + +// --- Response --- + +type channelMonitorUserListItem struct { + ID int64 `json:"id"` + Name string `json:"name"` + Provider string `json:"provider"` + GroupName string `json:"group_name"` + PrimaryModel string `json:"primary_model"` + PrimaryStatus string `json:"primary_status"` + PrimaryLatencyMs *int `json:"primary_latency_ms"` + Availability7d float64 `json:"availability_7d"` + ExtraModels []dto.ChannelMonitorExtraModelStatus `json:"extra_models"` +} + +type channelMonitorUserDetailResponse struct { + ID int64 `json:"id"` + Name string `json:"name"` + Provider string `json:"provider"` + GroupName string `json:"group_name"` + Models []channelMonitorUserModelStat `json:"models"` +} + +type channelMonitorUserModelStat struct { + Model string `json:"model"` + LatestStatus string `json:"latest_status"` + LatestLatencyMs *int `json:"latest_latency_ms"` + Availability7d float64 `json:"availability_7d"` + Availability15d float64 `json:"availability_15d"` + Availability30d float64 `json:"availability_30d"` + AvgLatency7dMs *int `json:"avg_latency_7d_ms"` +} + +func userMonitorViewToItem(v *service.UserMonitorView) channelMonitorUserListItem { + extras := make([]dto.ChannelMonitorExtraModelStatus, 0, len(v.ExtraModels)) + for _, e := range v.ExtraModels { + extras = append(extras, dto.ChannelMonitorExtraModelStatus{ + Model: e.Model, + Status: e.Status, + LatencyMs: e.LatencyMs, + }) + } + return channelMonitorUserListItem{ + ID: v.ID, + Name: v.Name, + Provider: v.Provider, + GroupName: v.GroupName, + PrimaryModel: v.PrimaryModel, + PrimaryStatus: v.PrimaryStatus, + PrimaryLatencyMs: v.PrimaryLatencyMs, + Availability7d: v.Availability7d, + ExtraModels: extras, + } +} + +func userMonitorDetailToResponse(d *service.UserMonitorDetail) *channelMonitorUserDetailResponse { + models := make([]channelMonitorUserModelStat, 0, len(d.Models)) + for _, m := range d.Models { + models = append(models, channelMonitorUserModelStat{ + Model: m.Model, + LatestStatus: m.LatestStatus, + LatestLatencyMs: m.LatestLatencyMs, + Availability7d: m.Availability7d, + Availability15d: m.Availability15d, + Availability30d: m.Availability30d, + AvgLatency7dMs: m.AvgLatency7dMs, + }) + } + return &channelMonitorUserDetailResponse{ + ID: d.ID, + Name: d.Name, + Provider: d.Provider, + GroupName: d.GroupName, + Models: models, + } +} + +// --- Handlers --- + +// List GET /api/v1/channel-monitors +func (h *ChannelMonitorUserHandler) List(c *gin.Context) { + views, err := h.monitorService.ListUserView(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + items := make([]channelMonitorUserListItem, 0, len(views)) + for _, v := range views { + items = append(items, userMonitorViewToItem(v)) + } + response.Success(c, gin.H{"items": items}) +} + +// GetStatus GET /api/v1/channel-monitors/:id/status +func (h *ChannelMonitorUserHandler) GetStatus(c *gin.Context) { + // 复用 admin.ParseChannelMonitorID 保持错误码与日志一致。 + id, ok := admin.ParseChannelMonitorID(c) + if !ok { + return + } + detail, err := h.monitorService.GetUserDetail(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, userMonitorDetailToResponse(detail)) +} diff --git a/backend/internal/handler/dto/channel_monitor.go b/backend/internal/handler/dto/channel_monitor.go new file mode 100644 index 00000000..3c0c5e11 --- /dev/null +++ b/backend/internal/handler/dto/channel_monitor.go @@ -0,0 +1,10 @@ +package dto + +// ChannelMonitorExtraModelStatus 渠道监控附加模型最近一次状态。 +// 同时被 admin handler(List 响应)与 user handler(List 响应)复用, +// 字段必须保持一致以保证前端拿到统一结构。 +type ChannelMonitorExtraModelStatus struct { + Model string `json:"model"` + Status string `json:"status"` + LatencyMs *int `json:"latency_ms"` +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 906a74f1..58480c93 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -31,6 +31,7 @@ type AdminHandlers struct { APIKey *admin.AdminAPIKeyHandler ScheduledTest *admin.ScheduledTestHandler Channel *admin.ChannelHandler + ChannelMonitor *admin.ChannelMonitorHandler Payment *admin.PaymentHandler } @@ -43,6 +44,7 @@ type Handlers struct { Redeem *RedeemHandler Subscription *SubscriptionHandler Announcement *AnnouncementHandler + ChannelMonitor *ChannelMonitorUserHandler Admin *AdminHandlers Gateway *GatewayHandler OpenAIGateway *OpenAIGatewayHandler diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 4b54d41a..7c1a5d1b 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -34,6 +34,7 @@ func ProvideAdminHandlers( apiKeyHandler *admin.AdminAPIKeyHandler, scheduledTestHandler *admin.ScheduledTestHandler, channelHandler *admin.ChannelHandler, + channelMonitorHandler *admin.ChannelMonitorHandler, paymentHandler *admin.PaymentHandler, ) *AdminHandlers { return &AdminHandlers{ @@ -62,6 +63,7 @@ func ProvideAdminHandlers( APIKey: apiKeyHandler, ScheduledTest: scheduledTestHandler, Channel: channelHandler, + ChannelMonitor: channelMonitorHandler, Payment: paymentHandler, } } @@ -85,6 +87,7 @@ func ProvideHandlers( redeemHandler *RedeemHandler, subscriptionHandler *SubscriptionHandler, announcementHandler *AnnouncementHandler, + channelMonitorUserHandler *ChannelMonitorUserHandler, adminHandlers *AdminHandlers, gatewayHandler *GatewayHandler, openaiGatewayHandler *OpenAIGatewayHandler, @@ -103,6 +106,7 @@ func ProvideHandlers( Redeem: redeemHandler, Subscription: subscriptionHandler, Announcement: announcementHandler, + ChannelMonitor: channelMonitorUserHandler, Admin: adminHandlers, Gateway: gatewayHandler, OpenAIGateway: openaiGatewayHandler, @@ -123,6 +127,7 @@ var ProviderSet = wire.NewSet( NewRedeemHandler, NewSubscriptionHandler, NewAnnouncementHandler, + NewChannelMonitorUserHandler, NewGatewayHandler, NewOpenAIGatewayHandler, NewTotpHandler, @@ -156,6 +161,7 @@ var ProviderSet = wire.NewSet( admin.NewAdminAPIKeyHandler, admin.NewScheduledTestHandler, admin.NewChannelHandler, + admin.NewChannelMonitorHandler, admin.NewPaymentHandler, // AdminHandlers and Handlers constructors diff --git a/backend/internal/repository/channel_monitor_repo.go b/backend/internal/repository/channel_monitor_repo.go new file mode 100644 index 00000000..b943f33c --- /dev/null +++ b/backend/internal/repository/channel_monitor_repo.go @@ -0,0 +1,450 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +// channelMonitorRepository 实现 service.ChannelMonitorRepository。 +// +// 选型说明: +// - CRUD 走 ent,复用项目的事务上下文支持 +// - 聚合查询(latest per model / availability)走原生 SQL,避免 ent 在 GROUP BY 上 +// 的样板代码,并保证索引能被命中 +type channelMonitorRepository struct { + client *dbent.Client + db *sql.DB +} + +// NewChannelMonitorRepository 创建仓储实例。 +func NewChannelMonitorRepository(client *dbent.Client, db *sql.DB) service.ChannelMonitorRepository { + return &channelMonitorRepository{client: client, db: db} +} + +// ---------- CRUD ---------- + +func (r *channelMonitorRepository) Create(ctx context.Context, m *service.ChannelMonitor) error { + client := clientFromContext(ctx, r.client) + builder := client.ChannelMonitor.Create(). + SetName(m.Name). + SetProvider(channelmonitor.Provider(m.Provider)). + SetEndpoint(m.Endpoint). + SetAPIKeyEncrypted(m.APIKey). // 调用方传入的已是密文 + SetPrimaryModel(m.PrimaryModel). + SetExtraModels(emptySliceIfNil(m.ExtraModels)). + SetGroupName(m.GroupName). + SetEnabled(m.Enabled). + SetIntervalSeconds(m.IntervalSeconds). + SetCreatedBy(m.CreatedBy) + + created, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil) + } + m.ID = created.ID + m.CreatedAt = created.CreatedAt + m.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *channelMonitorRepository) GetByID(ctx context.Context, id int64) (*service.ChannelMonitor, error) { + row, err := r.client.ChannelMonitor.Query(). + Where(channelmonitor.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil) + } + return entToServiceMonitor(row), nil +} + +func (r *channelMonitorRepository) Update(ctx context.Context, m *service.ChannelMonitor) error { + client := clientFromContext(ctx, r.client) + updater := client.ChannelMonitor.UpdateOneID(m.ID). + SetName(m.Name). + SetProvider(channelmonitor.Provider(m.Provider)). + SetEndpoint(m.Endpoint). + SetAPIKeyEncrypted(m.APIKey). + SetPrimaryModel(m.PrimaryModel). + SetExtraModels(emptySliceIfNil(m.ExtraModels)). + SetGroupName(m.GroupName). + SetEnabled(m.Enabled). + SetIntervalSeconds(m.IntervalSeconds) + + updated, err := updater.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil) + } + m.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *channelMonitorRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + if err := client.ChannelMonitor.DeleteOneID(id).Exec(ctx); err != nil { + return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil) + } + return nil +} + +func (r *channelMonitorRepository) List(ctx context.Context, params service.ChannelMonitorListParams) ([]*service.ChannelMonitor, int64, error) { + q := r.client.ChannelMonitor.Query() + if params.Provider != "" { + q = q.Where(channelmonitor.ProviderEQ(channelmonitor.Provider(params.Provider))) + } + if params.Enabled != nil { + q = q.Where(channelmonitor.EnabledEQ(*params.Enabled)) + } + if s := strings.TrimSpace(params.Search); s != "" { + q = q.Where(channelmonitor.Or( + channelmonitor.NameContainsFold(s), + channelmonitor.GroupNameContainsFold(s), + channelmonitor.PrimaryModelContainsFold(s), + )) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, 0, fmt.Errorf("count monitors: %w", err) + } + + pageSize := params.PageSize + if pageSize <= 0 { + pageSize = 20 + } + page := params.Page + if page <= 0 { + page = 1 + } + + rows, err := q. + Order(dbent.Desc(channelmonitor.FieldID)). + Offset((page - 1) * pageSize). + Limit(pageSize). + All(ctx) + if err != nil { + return nil, 0, fmt.Errorf("list monitors: %w", err) + } + + out := make([]*service.ChannelMonitor, 0, len(rows)) + for _, row := range rows { + out = append(out, entToServiceMonitor(row)) + } + return out, int64(total), nil +} + +// ---------- 调度器辅助 ---------- + +func (r *channelMonitorRepository) ListEnabled(ctx context.Context) ([]*service.ChannelMonitor, error) { + rows, err := r.client.ChannelMonitor.Query(). + Where(channelmonitor.EnabledEQ(true)). + All(ctx) + if err != nil { + return nil, fmt.Errorf("list enabled monitors: %w", err) + } + out := make([]*service.ChannelMonitor, 0, len(rows)) + for _, row := range rows { + out = append(out, entToServiceMonitor(row)) + } + return out, nil +} + +func (r *channelMonitorRepository) MarkChecked(ctx context.Context, id int64, checkedAt time.Time) error { + client := clientFromContext(ctx, r.client) + if err := client.ChannelMonitor.UpdateOneID(id). + SetLastCheckedAt(checkedAt). + Exec(ctx); err != nil { + return translatePersistenceError(err, service.ErrChannelMonitorNotFound, nil) + } + return nil +} + +func (r *channelMonitorRepository) InsertHistoryBatch(ctx context.Context, rows []*service.ChannelMonitorHistoryRow) error { + if len(rows) == 0 { + return nil + } + client := clientFromContext(ctx, r.client) + bulk := make([]*dbent.ChannelMonitorHistoryCreate, 0, len(rows)) + for _, row := range rows { + c := client.ChannelMonitorHistory.Create(). + SetMonitorID(row.MonitorID). + SetModel(row.Model). + SetStatus(channelmonitorhistory.Status(row.Status)). + SetMessage(row.Message). + SetCheckedAt(row.CheckedAt) + if row.LatencyMs != nil { + c = c.SetLatencyMs(*row.LatencyMs) + } + if row.PingLatencyMs != nil { + c = c.SetPingLatencyMs(*row.PingLatencyMs) + } + bulk = append(bulk, c) + } + if _, err := client.ChannelMonitorHistory.CreateBulk(bulk...).Save(ctx); err != nil { + return fmt.Errorf("insert history bulk: %w", err) + } + return nil +} + +func (r *channelMonitorRepository) DeleteHistoryBefore(ctx context.Context, before time.Time) (int64, error) { + client := clientFromContext(ctx, r.client) + n, err := client.ChannelMonitorHistory.Delete(). + Where(channelmonitorhistory.CheckedAtLT(before)). + Exec(ctx) + if err != nil { + return 0, fmt.Errorf("delete history before: %w", err) + } + return int64(n), nil +} + +// ListHistory 按 checked_at 倒序返回某个监控的最近 N 条历史记录。 +// model 为空时不过滤;非空时只返回该模型的记录。 +func (r *channelMonitorRepository) ListHistory(ctx context.Context, monitorID int64, model string, limit int) ([]*service.ChannelMonitorHistoryEntry, error) { + q := r.client.ChannelMonitorHistory.Query(). + Where(channelmonitorhistory.MonitorIDEQ(monitorID)) + if strings.TrimSpace(model) != "" { + q = q.Where(channelmonitorhistory.ModelEQ(model)) + } + rows, err := q. + Order(dbent.Desc(channelmonitorhistory.FieldCheckedAt)). + Limit(limit). + All(ctx) + if err != nil { + return nil, fmt.Errorf("list history: %w", err) + } + out := make([]*service.ChannelMonitorHistoryEntry, 0, len(rows)) + for _, row := range rows { + entry := &service.ChannelMonitorHistoryEntry{ + ID: row.ID, + Model: row.Model, + Status: string(row.Status), + LatencyMs: row.LatencyMs, + PingLatencyMs: row.PingLatencyMs, + Message: row.Message, + CheckedAt: row.CheckedAt, + } + out = append(out, entry) + } + return out, nil +} + +// ---------- 用户视图聚合(原生 SQL) ---------- + +// ListLatestPerModel 用 DISTINCT ON 取每个 (monitor_id, model) 的最近一条记录。 +// 借助 (monitor_id, model, checked_at DESC) 索引可走 Index Scan。 +func (r *channelMonitorRepository) ListLatestPerModel(ctx context.Context, monitorID int64) ([]*service.ChannelMonitorLatest, error) { + const q = ` + SELECT DISTINCT ON (model) + model, status, latency_ms, checked_at + FROM channel_monitor_histories + WHERE monitor_id = $1 + ORDER BY model, checked_at DESC + ` + rows, err := r.db.QueryContext(ctx, q, monitorID) + if err != nil { + return nil, fmt.Errorf("query latest per model: %w", err) + } + defer func() { _ = rows.Close() }() + + out := make([]*service.ChannelMonitorLatest, 0) + for rows.Next() { + l := &service.ChannelMonitorLatest{} + var latency sql.NullInt64 + if err := rows.Scan(&l.Model, &l.Status, &latency, &l.CheckedAt); err != nil { + return nil, fmt.Errorf("scan latest row: %w", err) + } + if latency.Valid { + v := int(latency.Int64) + l.LatencyMs = &v + } + out = append(out, l) + } + return out, rows.Err() +} + +// ComputeAvailability 计算指定窗口内每个模型的可用率与平均延迟。 +// "可用" = status IN (operational, degraded)。 +func (r *channelMonitorRepository) ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*service.ChannelMonitorAvailability, error) { + if windowDays <= 0 { + windowDays = 7 + } + const q = ` + SELECT + model, + COUNT(*) AS total_checks, + COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, + AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms + FROM channel_monitor_histories + WHERE monitor_id = $1 + AND checked_at >= $2 + GROUP BY model + ` + from := time.Now().AddDate(0, 0, -windowDays) + rows, err := r.db.QueryContext(ctx, q, monitorID, from) + if err != nil { + return nil, fmt.Errorf("query availability: %w", err) + } + defer func() { _ = rows.Close() }() + + out := make([]*service.ChannelMonitorAvailability, 0) + for rows.Next() { + row, err := scanAvailabilityRow(rows, windowDays) + if err != nil { + return nil, err + } + out = append(out, row) + } + return out, rows.Err() +} + +// scanAvailabilityRow 把单行 (model, total, ok, avg_latency) 扫描为 ChannelMonitorAvailability。 +// 仅服务于 ComputeAvailability(4 列);批量版本因为多一列 monitor_id 直接 inline 调 finalizeAvailabilityRow。 +func scanAvailabilityRow(rows interface{ Scan(...any) error }, windowDays int) (*service.ChannelMonitorAvailability, error) { + row := &service.ChannelMonitorAvailability{WindowDays: windowDays} + var avgLatency sql.NullFloat64 + if err := rows.Scan(&row.Model, &row.TotalChecks, &row.OperationalChecks, &avgLatency); err != nil { + return nil, fmt.Errorf("scan availability row: %w", err) + } + finalizeAvailabilityRow(row, avgLatency) + return row, nil +} + +// finalizeAvailabilityRow 根据 OperationalChecks/TotalChecks 算出可用率, +// 并把 sql.NullFloat64 的平均延迟解包为 *int。两处复用避免维护漂移。 +func finalizeAvailabilityRow(row *service.ChannelMonitorAvailability, avgLatency sql.NullFloat64) { + if row.TotalChecks > 0 { + row.AvailabilityPct = float64(row.OperationalChecks) * 100.0 / float64(row.TotalChecks) + } + if avgLatency.Valid { + v := int(avgLatency.Float64) + row.AvgLatencyMs = &v + } +} + +// ListLatestForMonitorIDs 一次性查询多个监控的"每个 (monitor_id, model) 最近一条"记录。 +// 利用 PG 的 DISTINCT ON 特性,借助 (monitor_id, model, checked_at DESC) 索引可走 Index Scan。 +func (r *channelMonitorRepository) ListLatestForMonitorIDs(ctx context.Context, ids []int64) (map[int64][]*service.ChannelMonitorLatest, error) { + out := make(map[int64][]*service.ChannelMonitorLatest, len(ids)) + if len(ids) == 0 { + return out, nil + } + const q = ` + SELECT DISTINCT ON (monitor_id, model) + monitor_id, model, status, latency_ms, checked_at + FROM channel_monitor_histories + WHERE monitor_id = ANY($1) + ORDER BY monitor_id, model, checked_at DESC + ` + rows, err := r.db.QueryContext(ctx, q, pq.Array(ids)) + if err != nil { + return nil, fmt.Errorf("query latest batch: %w", err) + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var monitorID int64 + l := &service.ChannelMonitorLatest{} + var latency sql.NullInt64 + if err := rows.Scan(&monitorID, &l.Model, &l.Status, &latency, &l.CheckedAt); err != nil { + return nil, fmt.Errorf("scan latest batch row: %w", err) + } + if latency.Valid { + v := int(latency.Int64) + l.LatencyMs = &v + } + out[monitorID] = append(out[monitorID], l) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// ComputeAvailabilityForMonitors 一次性计算多个监控在某个窗口内的每模型可用率与平均延迟。 +func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*service.ChannelMonitorAvailability, error) { + out := make(map[int64][]*service.ChannelMonitorAvailability, len(ids)) + if len(ids) == 0 { + return out, nil + } + if windowDays <= 0 { + windowDays = 7 + } + const q = ` + SELECT + monitor_id, + model, + COUNT(*) AS total_checks, + COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, + AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms + FROM channel_monitor_histories + WHERE monitor_id = ANY($1) + AND checked_at >= $2 + GROUP BY monitor_id, model + ` + from := time.Now().AddDate(0, 0, -windowDays) + rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), from) + if err != nil { + return nil, fmt.Errorf("query availability batch: %w", err) + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var monitorID int64 + row := &service.ChannelMonitorAvailability{WindowDays: windowDays} + var avgLatency sql.NullFloat64 + if err := rows.Scan(&monitorID, &row.Model, &row.TotalChecks, &row.OperationalChecks, &avgLatency); err != nil { + return nil, fmt.Errorf("scan availability batch row: %w", err) + } + // 批量查询多了首列 monitor_id;其余字段的可用率/平均延迟换算与单 monitor 版本一致, + // 抽出 finalizeAvailabilityRow 复用,避免两处分别维护除法与 NullFloat 解包。 + finalizeAvailabilityRow(row, avgLatency) + out[monitorID] = append(out[monitorID], row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// ---------- helpers ---------- + +func entToServiceMonitor(row *dbent.ChannelMonitor) *service.ChannelMonitor { + if row == nil { + return nil + } + extras := row.ExtraModels + if extras == nil { + extras = []string{} + } + return &service.ChannelMonitor{ + ID: row.ID, + Name: row.Name, + Provider: string(row.Provider), + Endpoint: row.Endpoint, + APIKey: row.APIKeyEncrypted, // 仍为密文,service 层负责解密 + PrimaryModel: row.PrimaryModel, + ExtraModels: extras, + GroupName: row.GroupName, + Enabled: row.Enabled, + IntervalSeconds: row.IntervalSeconds, + LastCheckedAt: row.LastCheckedAt, + CreatedBy: row.CreatedBy, + CreatedAt: row.CreatedAt, + UpdatedAt: row.UpdatedAt, + } +} + +func emptySliceIfNil(in []string) []string { + if in == nil { + return []string{} + } + return in +} diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index d3adb4a0..7427cd04 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -89,6 +89,7 @@ var ProviderSet = wire.NewSet( NewErrorPassthroughRepository, NewTLSFingerprintProfileRepository, NewChannelRepository, + NewChannelMonitorRepository, // Cache implementations NewGatewayCache, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 84c963ec..0381dc57 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -88,6 +88,9 @@ func RegisterAdminRoutes( // 渠道管理 registerChannelRoutes(admin, h) + + // 渠道监控 + registerChannelMonitorRoutes(admin, h) } } @@ -564,3 +567,16 @@ func registerChannelRoutes(admin *gin.RouterGroup, h *handler.Handlers) { channels.DELETE("/:id", h.Admin.Channel.Delete) } } + +func registerChannelMonitorRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + monitors := admin.Group("/channel-monitors") + { + monitors.GET("", h.Admin.ChannelMonitor.List) + monitors.POST("", h.Admin.ChannelMonitor.Create) + monitors.GET("/:id", h.Admin.ChannelMonitor.Get) + monitors.PUT("/:id", h.Admin.ChannelMonitor.Update) + monitors.DELETE("/:id", h.Admin.ChannelMonitor.Delete) + monitors.POST("/:id/run", h.Admin.ChannelMonitor.Run) + monitors.GET("/:id/history", h.Admin.ChannelMonitor.History) + } +} diff --git a/backend/internal/server/routes/user.go b/backend/internal/server/routes/user.go index b76bb3cd..60503a5b 100644 --- a/backend/internal/server/routes/user.go +++ b/backend/internal/server/routes/user.go @@ -103,5 +103,12 @@ func RegisterUserRoutes( subscriptions.GET("/progress", h.Subscription.GetProgress) subscriptions.GET("/summary", h.Subscription.GetSummary) } + + // 渠道监控(用户只读) + monitors := authenticated.Group("/channel-monitors") + { + monitors.GET("", h.ChannelMonitor.List) + monitors.GET("/:id/status", h.ChannelMonitor.GetStatus) + } } } diff --git a/backend/internal/service/channel_monitor_aggregator.go b/backend/internal/service/channel_monitor_aggregator.go new file mode 100644 index 00000000..97015b40 --- /dev/null +++ b/backend/internal/service/channel_monitor_aggregator.go @@ -0,0 +1,217 @@ +package service + +import ( + "context" + "fmt" + "log/slog" +) + +// 渠道监控聚合层:把 latest + availability 拼成 admin/user 视图所需的 summary / detail。 +// 所有方法都遵守"失败仅日志,返回零值"的原则,避免 N+1 查询失败拖垮列表渲染。 + +// BatchMonitorStatusSummary 批量聚合多个监控的 latest + 7d 可用率(admin/user list 用,消除 N+1)。 +// 失败时返回空 map,错误仅日志,不影响列表渲染。 +// +// 参数: +// - ids: 要聚合的 monitor ID 列表 +// - primaryByID: monitor ID -> primary model(用于读 7d 可用率与 latest 状态) +// - extrasByID: monitor ID -> extra models 列表(用于读 latest 状态填充 ExtraModels) +func (s *ChannelMonitorService) BatchMonitorStatusSummary( + ctx context.Context, + ids []int64, + primaryByID map[int64]string, + extrasByID map[int64][]string, +) map[int64]MonitorStatusSummary { + out := make(map[int64]MonitorStatusSummary, len(ids)) + if len(ids) == 0 { + return out + } + latestMap, err := s.repo.ListLatestForMonitorIDs(ctx, ids) + if err != nil { + slog.Warn("channel_monitor: batch load latest failed", "error", err) + latestMap = map[int64][]*ChannelMonitorLatest{} + } + availMap, err := s.repo.ComputeAvailabilityForMonitors(ctx, ids, monitorAvailability7Days) + if err != nil { + slog.Warn("channel_monitor: batch compute availability failed", "error", err) + availMap = map[int64][]*ChannelMonitorAvailability{} + } + + for _, id := range ids { + out[id] = buildStatusSummary( + indexLatestByModel(latestMap[id]), + indexAvailabilityByModel(availMap[id]), + primaryByID[id], + extrasByID[id], + ) + } + return out +} + +// ListUserView 用户只读视图:列出所有 enabled 监控的概览。 +// 使用批量聚合接口避免 N+1:1 次查 monitors,1 次查 latest(所有 monitor),1 次查 availability。 +func (s *ChannelMonitorService) ListUserView(ctx context.Context) ([]*UserMonitorView, error) { + monitors, err := s.repo.ListEnabled(ctx) + if err != nil { + return nil, fmt.Errorf("list enabled monitors: %w", err) + } + if len(monitors) == 0 { + return []*UserMonitorView{}, nil + } + + ids := make([]int64, 0, len(monitors)) + primaryByID := make(map[int64]string, len(monitors)) + extrasByID := make(map[int64][]string, len(monitors)) + for _, m := range monitors { + ids = append(ids, m.ID) + primaryByID[m.ID] = m.PrimaryModel + extrasByID[m.ID] = m.ExtraModels + } + summaries := s.BatchMonitorStatusSummary(ctx, ids, primaryByID, extrasByID) + + views := make([]*UserMonitorView, 0, len(monitors)) + for _, m := range monitors { + summary := summaries[m.ID] + views = append(views, buildUserViewFromSummary(m, summary)) + } + return views, nil +} + +// GetUserDetail 用户只读视图:单个监控详情(每个模型 7d/15d/30d 可用率与平均延迟)。 +// 不暴露 api_key。 +func (s *ChannelMonitorService) GetUserDetail(ctx context.Context, id int64) (*UserMonitorDetail, error) { + m, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + if !m.Enabled { + return nil, ErrChannelMonitorNotFound + } + + latest, err := s.repo.ListLatestPerModel(ctx, id) + if err != nil { + return nil, fmt.Errorf("list latest per model: %w", err) + } + availMap, err := s.collectAvailabilityWindows(ctx, id) + if err != nil { + return nil, err + } + + models := mergeModelDetails(m, latest, availMap) + return &UserMonitorDetail{ + ID: m.ID, + Name: m.Name, + Provider: m.Provider, + GroupName: m.GroupName, + Models: models, + }, nil +} + +// collectAvailabilityWindows 一次性查询 7/15/30 天三个窗口,按模型组织。 +func (s *ChannelMonitorService) collectAvailabilityWindows(ctx context.Context, monitorID int64) (map[int]map[string]*ChannelMonitorAvailability, error) { + out := make(map[int]map[string]*ChannelMonitorAvailability, 3) + windows := []int{monitorAvailability7Days, monitorAvailability15Days, monitorAvailability30Days} + for _, w := range windows { + rows, err := s.repo.ComputeAvailability(ctx, monitorID, w) + if err != nil { + return nil, fmt.Errorf("compute availability %dd: %w", w, err) + } + out[w] = indexAvailabilityByModel(rows) + } + return out, nil +} + +// ---------- 纯函数 helper(无 IO,可在 batch / 单 monitor / detail 路径复用)---------- + +// indexLatestByModel 把 latest 切片按 model 索引(小工具,避免在 hot path 重复写)。 +func indexLatestByModel(rows []*ChannelMonitorLatest) map[string]*ChannelMonitorLatest { + m := make(map[string]*ChannelMonitorLatest, len(rows)) + for _, r := range rows { + m[r.Model] = r + } + return m +} + +// indexAvailabilityByModel 把 availability 切片按 model 索引。 +func indexAvailabilityByModel(rows []*ChannelMonitorAvailability) map[string]*ChannelMonitorAvailability { + m := make(map[string]*ChannelMonitorAvailability, len(rows)) + for _, r := range rows { + m[r.Model] = r + } + return m +} + +// buildStatusSummary 由 latest + availability 字典构造 MonitorStatusSummary。 +// 不做任何 IO,纯组装,便于在 batch 与单 monitor 路径复用。 +func buildStatusSummary( + latestByModel map[string]*ChannelMonitorLatest, + availByModel map[string]*ChannelMonitorAvailability, + primary string, + extras []string, +) MonitorStatusSummary { + summary := MonitorStatusSummary{ExtraModels: make([]ExtraModelStatus, 0, len(extras))} + if primary != "" { + if l, ok := latestByModel[primary]; ok { + summary.PrimaryStatus = l.Status + summary.PrimaryLatencyMs = l.LatencyMs + } + if a, ok := availByModel[primary]; ok { + summary.Availability7d = a.AvailabilityPct + } + } + for _, model := range extras { + entry := ExtraModelStatus{Model: model} + if l, ok := latestByModel[model]; ok { + entry.Status = l.Status + entry.LatencyMs = l.LatencyMs + } + summary.ExtraModels = append(summary.ExtraModels, entry) + } + return summary +} + +// buildUserViewFromSummary 用预聚合好的 MonitorStatusSummary 装填 UserMonitorView(无 IO)。 +func buildUserViewFromSummary(m *ChannelMonitor, summary MonitorStatusSummary) *UserMonitorView { + return &UserMonitorView{ + ID: m.ID, + Name: m.Name, + Provider: m.Provider, + GroupName: m.GroupName, + PrimaryModel: m.PrimaryModel, + PrimaryStatus: summary.PrimaryStatus, + PrimaryLatencyMs: summary.PrimaryLatencyMs, + Availability7d: summary.Availability7d, + ExtraModels: summary.ExtraModels, + } +} + +// mergeModelDetails 合并 latest + availability 三个窗口为 ModelDetail 列表。 +// 复用 indexLatestByModel,避免在多处重复写 build map 逻辑。 +func mergeModelDetails( + m *ChannelMonitor, + latest []*ChannelMonitorLatest, + availMap map[int]map[string]*ChannelMonitorAvailability, +) []ModelDetail { + all := append([]string{m.PrimaryModel}, m.ExtraModels...) + latestByModel := indexLatestByModel(latest) + out := make([]ModelDetail, 0, len(all)) + for _, model := range all { + d := ModelDetail{Model: model} + if l, ok := latestByModel[model]; ok { + d.LatestStatus = l.Status + d.LatestLatencyMs = l.LatencyMs + } + if a, ok := availMap[monitorAvailability7Days][model]; ok { + d.Availability7d = a.AvailabilityPct + d.AvgLatency7dMs = a.AvgLatencyMs + } + if a, ok := availMap[monitorAvailability15Days][model]; ok { + d.Availability15d = a.AvailabilityPct + } + if a, ok := availMap[monitorAvailability30Days][model]; ok { + d.Availability30d = a.AvailabilityPct + } + out = append(out, d) + } + return out +} diff --git a/backend/internal/service/channel_monitor_challenge.go b/backend/internal/service/channel_monitor_challenge.go new file mode 100644 index 00000000..e81a9e2a --- /dev/null +++ b/backend/internal/service/channel_monitor_challenge.go @@ -0,0 +1,80 @@ +package service + +import ( + "fmt" + "math/rand/v2" + "regexp" + "strconv" +) + +// monitorChallengePromptTemplate 1:1 复刻 BingZi-233/check-cx 的 few-shot 模板。 +const monitorChallengePromptTemplate = `Calculate and respond with ONLY the number, nothing else. + +Q: 3 + 5 = ? +A: 8 + +Q: 12 - 7 = ? +A: 5 + +Q: %d %s %d = ? +A:` + +// monitorChallengeNumberRegex 提取响应中的所有整数(含负号)。 +var monitorChallengeNumberRegex = regexp.MustCompile(`-?\d+`) + +// monitorChallenge 一次 challenge 的 prompt + 期望答案。 +type monitorChallenge struct { + Prompt string + Expected string +} + +// generateChallenge 生成一次随机算术 challenge: +// - 随机两个 [monitorChallengeMin, monitorChallengeMax] 整数 +// - 50% 加 / 50% 减;减法用 max - min 保证非负 +// - 渲染 few-shot 模板 +// +// 不强求加密随机:math/rand/v2 足够分散,避免 crypto/rand 的开销。 +func generateChallenge() monitorChallenge { + a := randIntInRange(monitorChallengeMin, monitorChallengeMax) + b := randIntInRange(monitorChallengeMin, monitorChallengeMax) + + if rand.IntN(2) == 0 { //nolint:gosec // 仅用于生成测试问题,无安全影响 + // 加法 + return monitorChallenge{ + Prompt: fmt.Sprintf(monitorChallengePromptTemplate, a, "+", b), + Expected: strconv.Itoa(a + b), + } + } + + // 减法,保证非负 + hi, lo := a, b + if lo > hi { + hi, lo = lo, hi + } + return monitorChallenge{ + Prompt: fmt.Sprintf(monitorChallengePromptTemplate, hi, "-", lo), + Expected: strconv.Itoa(hi - lo), + } +} + +// randIntInRange 返回 [min, max] 闭区间的随机整数。 +func randIntInRange(minVal, maxVal int) int { + if maxVal <= minVal { + return minVal + } + return minVal + rand.IntN(maxVal-minVal+1) //nolint:gosec +} + +// validateChallenge 在响应文本中查找 expected 整数答案,返回是否通过校验。 +func validateChallenge(responseText, expected string) bool { + if responseText == "" || expected == "" { + return false + } + matches := monitorChallengeNumberRegex.FindAllString(responseText, -1) + for _, m := range matches { + if m == expected { + return true + } + } + return false +} diff --git a/backend/internal/service/channel_monitor_checker.go b/backend/internal/service/channel_monitor_checker.go new file mode 100644 index 00000000..ba5ce0e8 --- /dev/null +++ b/backend/internal/service/channel_monitor_checker.go @@ -0,0 +1,299 @@ +package service + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/tidwall/gjson" +) + +// monitorHTTPClient 共享一个 http.Client,避免每次检测重建 transport。 +// 自定义 Transport 在 dial 时强制再次校验 IP,防止 DNS rebinding 绕过 validateEndpoint。 +var monitorHTTPClient = newSSRFSafeHTTPClient(monitorRequestTimeout) + +// monitorPingHTTPClient 用于 endpoint origin 的 HEAD ping,超时更短。 +var monitorPingHTTPClient = newSSRFSafeHTTPClient(monitorPingTimeout) + +// newSSRFSafeHTTPClient 返回一个使用 safeDialContext 的 http.Client。 +// 仅供监控模块对外发起请求使用——所有目标都应是公网 endpoint。 +func newSSRFSafeHTTPClient(timeout time.Duration) *http.Client { + tr := &http.Transport{ + DialContext: safeDialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 16, + IdleConnTimeout: monitorIdleConnTimeout, + TLSHandshakeTimeout: monitorTLSHandshakeTimeout, + ResponseHeaderTimeout: monitorResponseHeaderTimeout, + } + return &http.Client{Timeout: timeout, Transport: tr} +} + +// runCheckForModel 对单个 (provider, model) 做一次完整检测。 +// 不返回 error:所有失败都包装进 CheckResult.Status=error/failed。 +func runCheckForModel(ctx context.Context, provider, endpoint, apiKey, model string) *CheckResult { + res := &CheckResult{ + Model: model, + Status: MonitorStatusError, + CheckedAt: time.Now(), + } + + challenge := generateChallenge() + + start := time.Now() + respText, statusCode, err := callProvider(ctx, provider, endpoint, apiKey, model, challenge.Prompt) + latency := time.Since(start) + latencyMs := int(latency / time.Millisecond) + res.LatencyMs = &latencyMs + + if err != nil { + res.Status = MonitorStatusError + res.Message = truncateMessage(sanitizeErrorMessage(err.Error())) + return res + } + if statusCode < 200 || statusCode >= 300 { + res.Status = MonitorStatusError + res.Message = truncateMessage(sanitizeErrorMessage(fmt.Sprintf("upstream HTTP %d: %s", statusCode, respText))) + return res + } + + if !validateChallenge(respText, challenge.Expected) { + res.Status = MonitorStatusFailed + res.Message = truncateMessage(sanitizeErrorMessage(fmt.Sprintf("challenge mismatch (expected %s, got %q)", challenge.Expected, respText))) + return res + } + + if latency >= monitorDegradedThreshold { + res.Status = MonitorStatusDegraded + res.Message = truncateMessage(fmt.Sprintf("slow response: %dms", latencyMs)) + return res + } + + res.Status = MonitorStatusOperational + return res +} + +// pingEndpointOrigin 对 endpoint 的 origin (scheme://host) 发起 HEAD 请求,返回耗时。 +// 失败时返回 nil(不影响主状态判定)。 +func pingEndpointOrigin(ctx context.Context, endpoint string) *int { + origin, err := extractOrigin(endpoint) + if err != nil || origin == "" { + return nil + } + req, err := http.NewRequestWithContext(ctx, http.MethodHead, origin, nil) + if err != nil { + return nil + } + start := time.Now() + resp, err := monitorPingHTTPClient.Do(req) + if err != nil { + return nil + } + defer func() { _ = resp.Body.Close() }() + _, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, monitorPingDiscardMaxBytes)) + ms := int(time.Since(start) / time.Millisecond) + return &ms +} + +// providerAdapter 描述某个 provider 在 challenge 检测中需要的 4 件事: +// - 拼出请求路径(含 model 占位) +// - 序列化请求体 +// - 构造鉴权头 +// - 从响应 JSON 中按 path 提取文本(gjson path) +// +// 加新 provider 只需要在 providerAdapters 里增加一个条目,无需触碰 callProvider / validateProvider。 +type providerAdapter struct { + buildPath func(model string) string + buildBody func(model, prompt string) ([]byte, error) + buildHeaders func(apiKey string) map[string]string + textPath string // gjson 提取响应文本的 path +} + +// providerAdapters 全部已支持的 provider。键值即 MonitorProvider* 字符串。 +// +//nolint:gochecknoglobals // 适配器表是只读静态数据,初始化后不变更。 +var providerAdapters = map[string]providerAdapter{ + MonitorProviderOpenAI: { + buildPath: func(string) string { return providerOpenAIPath }, + buildBody: func(model, prompt string) ([]byte, error) { + return json.Marshal(map[string]any{ + "model": model, + "messages": []map[string]string{{"role": "user", "content": prompt}}, + "max_tokens": monitorChallengeMaxTokens, + "stream": false, + }) + }, + buildHeaders: func(apiKey string) map[string]string { + return map[string]string{"Authorization": "Bearer " + apiKey} + }, + textPath: "choices.0.message.content", + }, + MonitorProviderAnthropic: { + buildPath: func(string) string { return providerAnthropicPath }, + buildBody: func(model, prompt string) ([]byte, error) { + return json.Marshal(map[string]any{ + "model": model, + "messages": []map[string]string{{"role": "user", "content": prompt}}, + "max_tokens": monitorChallengeMaxTokens, + }) + }, + buildHeaders: func(apiKey string) map[string]string { + return map[string]string{ + "x-api-key": apiKey, + "anthropic-version": monitorAnthropicAPIVersion, + } + }, + textPath: "content.0.text", + }, + MonitorProviderGemini: { + // Gemini 把 model 名写在 URL path 上:/v1beta/models/{model}:generateContent + buildPath: func(model string) string { return fmt.Sprintf(providerGeminiPathTemplate, model) }, + buildBody: func(_, prompt string) ([]byte, error) { + return json.Marshal(map[string]any{ + "contents": []map[string]any{ + {"parts": []map[string]any{{"text": prompt}}}, + }, + "generationConfig": map[string]any{"maxOutputTokens": monitorChallengeMaxTokens}, + }) + }, + // 使用 x-goog-api-key header 而不是 ?key= query,避免 *url.Error 把 key 回填到错误日志。 + buildHeaders: func(apiKey string) map[string]string { + return map[string]string{"x-goog-api-key": apiKey} + }, + textPath: "candidates.0.content.parts.0.text", + }, +} + +// isSupportedProvider 校验 provider 字符串是否在 adapter 表中。 +// 供 validate.go 的 validateProvider 复用,避免两份 switch 漂移。 +func isSupportedProvider(p string) bool { + _, ok := providerAdapters[p] + return ok +} + +// callProvider 通过 providerAdapters 分发到具体实现。 +// 返回值:响应中提取的文本、HTTP status、网络/序列化错误。 +func callProvider(ctx context.Context, provider, endpoint, apiKey, model, prompt string) (string, int, error) { + adapter, ok := providerAdapters[provider] + if !ok { + return "", 0, fmt.Errorf("unsupported provider %q", provider) + } + body, err := adapter.buildBody(model, prompt) + if err != nil { + return "", 0, fmt.Errorf("marshal body: %w", err) + } + full := joinURL(endpoint, adapter.buildPath(model)) + respBody, status, err := postRawJSON(ctx, full, body, adapter.buildHeaders(apiKey)) + if err != nil { + return "", status, err + } + return gjson.GetBytes(respBody, adapter.textPath).String(), status, nil +} + +// postRawJSON 发送 POST + 已序列化好的 JSON 字节,限制响应体大小,返回响应字节、HTTP status、错误。 +// adapter 自行 marshal 是为了精确控制字段顺序与类型,所以这里直接收 []byte 而不是 any。 +func postRawJSON(ctx context.Context, fullURL string, payload []byte, headers map[string]string) ([]byte, int, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(payload)) + if err != nil { + return nil, 0, fmt.Errorf("build request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + for k, v := range headers { + req.Header.Set(k, v) + } + + resp, err := monitorHTTPClient.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("do request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + respBody, err := io.ReadAll(io.LimitReader(resp.Body, monitorResponseMaxBytes)) + if err != nil { + return nil, resp.StatusCode, fmt.Errorf("read body: %w", err) + } + return respBody, resp.StatusCode, nil +} + +// joinURL 把 base origin 与 path 拼成完整 URL。 +// 容忍 base 末尾有/无斜杠,path 必带前导斜杠。 +func joinURL(base, path string) string { + base = strings.TrimRight(base, "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return base + path +} + +// extractOrigin 从一个 endpoint URL 中提取 scheme://host[:port] 部分。 +func extractOrigin(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", err + } + if u.Scheme == "" || u.Host == "" { + return "", errors.New("endpoint missing scheme or host") + } + return u.Scheme + "://" + u.Host, nil +} + +// monitorSensitiveQueryParamRegex 匹配 URL query 中可能泄露凭证的参数: +// key / api_key / api-key / access_token / token / authorization / x-api-key。 +// 大小写不敏感,匹配 `?name=value` 或 `&name=value` 形式(value 截到 & 或字符串末尾)。 +var monitorSensitiveQueryParamRegex = regexp.MustCompile(`(?i)([?&](?:key|api[_-]?key|access[_-]?token|token|authorization|x-api-key)=)[^&\s"']+`) + +// monitorAPIKeyPatterns 匹配常见 provider 的 API key 字面量。 +// 顺序敏感:sk-ant- 必须放在 sk- 之前,否则会被通用 sk- 模式先消费。 +var monitorAPIKeyPatterns = []struct { + pattern *regexp.Regexp + replace string +}{ + // Anthropic(带前缀,必须先匹配):sk-ant-xxxxxxx + {regexp.MustCompile(`sk-ant-[A-Za-z0-9_-]{20,}`), "sk-ant-***REDACTED***"}, + // OpenAI / Anthropic 通用 sk-: sk-xxxxxxx + {regexp.MustCompile(`sk-[A-Za-z0-9-]{20,}`), "sk-***REDACTED***"}, + // Gemini / Google API Key:固定前缀 + 35 位 + {regexp.MustCompile(`AIza[A-Za-z0-9_-]{35}`), "AIza***REDACTED***"}, + // JWT 三段式(Bearer 后常出现):eyJxxx.eyJxxx.signature + {regexp.MustCompile(`eyJ[A-Za-z0-9_-]{8,}\.eyJ[A-Za-z0-9_-]{8,}\.[A-Za-z0-9_-]{8,}`), "eyJ***REDACTED.JWT***"}, +} + +// sanitizeErrorMessage 擦除错误/响应文本中可能泄露的 API key。 +// 处理两类来源: +// 1. URL query 中的 ?key= / ?api_key= 等(Go *url.Error 会回填完整 URL) +// 2. 上游 HTTP body 文本里直接出现的 sk-* / AIza* / JWT 等密钥碎片 +// +// 注意:与 gemini_messages_compat_service.go 的 sanitizeUpstreamErrorMessage 关注点类似但参数集更广, +// 监控模块独立维护,避免互相耦合。 +func sanitizeErrorMessage(msg string) string { + if msg == "" { + return msg + } + msg = monitorSensitiveQueryParamRegex.ReplaceAllString(msg, `${1}REDACTED`) + for _, p := range monitorAPIKeyPatterns { + msg = p.pattern.ReplaceAllString(msg, p.replace) + } + return msg +} + +// truncateMessage 把消息按 monitorMessageMaxBytes 截断,避免 DB 列溢出与日志过长。 +func truncateMessage(msg string) string { + if len(msg) <= monitorMessageMaxBytes { + return msg + } + const ellipsis = "...(truncated)" + cutoff := monitorMessageMaxBytes - len(ellipsis) + if cutoff < 0 { + cutoff = 0 + } + return msg[:cutoff] + ellipsis +} diff --git a/backend/internal/service/channel_monitor_const.go b/backend/internal/service/channel_monitor_const.go new file mode 100644 index 00000000..b4c02bcb --- /dev/null +++ b/backend/internal/service/channel_monitor_const.go @@ -0,0 +1,137 @@ +package service + +import ( + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// ChannelMonitor 全局常量。 +// 这些是 MVP 阶段的硬编码值,按需可以提到 config 中。 +const ( + // monitorRequestTimeout 单次模型请求总超时(含 Body 读取)。 + monitorRequestTimeout = 45 * time.Second + // monitorPingTimeout HEAD 请求 endpoint origin 的超时。 + monitorPingTimeout = 8 * time.Second + // monitorDegradedThreshold 主请求成功但耗时超过该阈值视为 degraded。 + monitorDegradedThreshold = 6 * time.Second + // monitorHistoryRetentionDays 历史保留天数(每天清理一次)。 + monitorHistoryRetentionDays = 30 + // monitorWorkerConcurrency 调度器并发执行的监控数(pond 池容量)。 + monitorWorkerConcurrency = 5 + // monitorTickerInterval 调度器扫描"到期监控"的间隔。 + monitorTickerInterval = 5 * time.Second + // monitorMinIntervalSeconds / monitorMaxIntervalSeconds 用户配置的检测间隔上下限。 + monitorMinIntervalSeconds = 15 + monitorMaxIntervalSeconds = 3600 + // monitorMessageMaxBytes message 字段最大字节数(与 schema/migration 一致)。 + monitorMessageMaxBytes = 500 + // monitorResponseMaxBytes 单次模型响应最大读取字节,防止 OOM。 + monitorResponseMaxBytes = 64 * 1024 + // monitorChallengeMin / monitorChallengeMax challenge 操作数范围。 + monitorChallengeMin = 1 + monitorChallengeMax = 50 + + // providerOpenAIPath OpenAI Chat Completions 路径。 + providerOpenAIPath = "/v1/chat/completions" + // providerAnthropicPath Anthropic Messages 路径。 + providerAnthropicPath = "/v1/messages" + // providerGeminiPathTemplate Gemini generateContent 路径模板(含 model 占位)。 + providerGeminiPathTemplate = "/v1beta/models/%s:generateContent" + + // MonitorProviderOpenAI / Anthropic / Gemini provider 字符串常量(也是 ent enum 的实际值)。 + MonitorProviderOpenAI = "openai" + MonitorProviderAnthropic = "anthropic" + MonitorProviderGemini = "gemini" + + // MonitorStatusOperational 等监控状态字符串常量(与 ent enum 一致)。 + MonitorStatusOperational = "operational" + MonitorStatusDegraded = "degraded" + MonitorStatusFailed = "failed" + MonitorStatusError = "error" + + // monitorAvailability7Days / 15 / 30 用于聚合查询窗口。 + monitorAvailability7Days = 7 + monitorAvailability15Days = 15 + monitorAvailability30Days = 30 + + // monitorCleanupCheckInterval 历史清理调度器的检查频率(每小时检查"是否到 03:00")。 + monitorCleanupCheckInterval = time.Hour + // monitorCleanupHour 凌晨 3 点执行历史清理。 + monitorCleanupHour = 3 + + // MonitorHistoryDefaultLimit 历史查询默认返回条数(handler 层共享)。 + MonitorHistoryDefaultLimit = 100 + // MonitorHistoryMaxLimit 历史查询最大返回条数(handler 层共享)。 + MonitorHistoryMaxLimit = 1000 + + // monitorEndpointResolveTimeout validateEndpoint 解析 hostname 的最长耗时。 + monitorEndpointResolveTimeout = 5 * time.Second + + // ---- checker / runner 行为参数(消除 magic 值)---- + + // monitorAnthropicAPIVersion Anthropic Messages API 版本头。 + monitorAnthropicAPIVersion = "2023-06-01" + // monitorChallengeMaxTokens 单次 challenge 请求的 max_tokens(足够回答个位数算术)。 + monitorChallengeMaxTokens = 50 + + // monitorListDueTimeout tickDueChecks 查询到期监控的总超时。 + monitorListDueTimeout = 10 * time.Second + // monitorRunOneBuffer runOne 的总超时缓冲(除请求超时与 ping 超时外的额外裕量)。 + monitorRunOneBuffer = 10 * time.Second + // monitorCleanupTimeout 历史清理任务的总超时。 + monitorCleanupTimeout = 30 * time.Second + // monitorCleanupDayLayout 历史清理用于"今日是否已跑过"判定的日期格式。 + monitorCleanupDayLayout = "2006-01-02" + + // monitorIdleConnTimeout HTTP transport 空闲连接关闭超时。 + monitorIdleConnTimeout = 30 * time.Second + // monitorTLSHandshakeTimeout HTTP transport TLS 握手超时。 + monitorTLSHandshakeTimeout = 10 * time.Second + // monitorResponseHeaderTimeout HTTP transport 等待响应头超时。 + monitorResponseHeaderTimeout = 30 * time.Second + // monitorPingDiscardMaxBytes ping 时丢弃响应体的最大字节数。 + monitorPingDiscardMaxBytes = 1024 + + // monitorDialTimeout 自定义 dialer 单次连接超时。 + monitorDialTimeout = 10 * time.Second + // monitorDialKeepAlive 自定义 dialer keep-alive 间隔。 + monitorDialKeepAlive = 30 * time.Second +) + +// 业务错误(统一在此声明,避免散落)。 +var ( + ErrChannelMonitorNotFound = infraerrors.NotFound( + "CHANNEL_MONITOR_NOT_FOUND", "channel monitor not found", + ) + ErrChannelMonitorInvalidProvider = infraerrors.BadRequest( + "CHANNEL_MONITOR_INVALID_PROVIDER", "provider must be one of openai/anthropic/gemini", + ) + ErrChannelMonitorInvalidInterval = infraerrors.BadRequest( + "CHANNEL_MONITOR_INVALID_INTERVAL", "interval_seconds must be in [15, 3600]", + ) + ErrChannelMonitorInvalidEndpoint = infraerrors.BadRequest( + "CHANNEL_MONITOR_INVALID_ENDPOINT", "endpoint must be a valid https URL", + ) + ErrChannelMonitorEndpointScheme = infraerrors.BadRequest( + "CHANNEL_MONITOR_ENDPOINT_SCHEME", "endpoint must use https scheme", + ) + ErrChannelMonitorEndpointPath = infraerrors.BadRequest( + "CHANNEL_MONITOR_ENDPOINT_PATH", "endpoint must be base origin only (no path/query/fragment)", + ) + ErrChannelMonitorEndpointPrivate = infraerrors.BadRequest( + "CHANNEL_MONITOR_ENDPOINT_PRIVATE", "endpoint must be a public host", + ) + ErrChannelMonitorEndpointUnreachable = infraerrors.BadRequest( + "CHANNEL_MONITOR_ENDPOINT_UNREACHABLE", "endpoint hostname could not be resolved", + ) + ErrChannelMonitorMissingAPIKey = infraerrors.BadRequest( + "CHANNEL_MONITOR_MISSING_API_KEY", "api_key is required when creating a monitor", + ) + ErrChannelMonitorMissingPrimaryModel = infraerrors.BadRequest( + "CHANNEL_MONITOR_MISSING_PRIMARY_MODEL", "primary_model is required", + ) + ErrChannelMonitorAPIKeyDecryptFailed = infraerrors.InternalServer( + "CHANNEL_MONITOR_KEY_DECRYPT_FAILED", "api key decryption failed; please re-edit the monitor with a fresh key", + ) +) diff --git a/backend/internal/service/channel_monitor_runner.go b/backend/internal/service/channel_monitor_runner.go new file mode 100644 index 00000000..377903d3 --- /dev/null +++ b/backend/internal/service/channel_monitor_runner.go @@ -0,0 +1,208 @@ +package service + +import ( + "context" + "log/slog" + "sync" + "time" + + "github.com/alitto/pond/v2" +) + +// ChannelMonitorRunner 渠道监控调度器。 +// +// 职责: +// - 每 monitorTickerInterval 扫描一次"到期需要检测"的监控 +// - 通过 pond 池(容量 monitorWorkerConcurrency)异步执行检测 +// - 每小时检查一次时钟,到 monitorCleanupHour 点时执行历史清理 +// - Stop 时优雅关闭:池 drain + ticker.Stop + wg.Wait +// +// 不引入 cron 库;清理调度通过"每小时检查时间"实现,足够 MVP。 +type ChannelMonitorRunner struct { + svc *ChannelMonitorService + + pool pond.Pool + stopCh chan struct{} + once sync.Once + wg sync.WaitGroup + + // inFlight 跟踪正在执行的 monitor.ID。tickDueChecks 调度前会检查避免重复提交, + // 防止单次检测耗时 > interval 时同一 monitor 被并发执行。 + inFlight map[int64]struct{} + inFlightMu sync.Mutex + + // 清理状态:lastCleanupDay 记录上次清理的"年-月-日",避免同一天重复跑。 + lastCleanupDay string + cleanupMu sync.Mutex +} + +// NewChannelMonitorRunner 构造调度器。Start 在 wire 中调用。 +func NewChannelMonitorRunner(svc *ChannelMonitorService) *ChannelMonitorRunner { + return &ChannelMonitorRunner{ + svc: svc, + stopCh: make(chan struct{}), + inFlight: make(map[int64]struct{}), + } +} + +// Start 启动 ticker + worker pool + cleanup loop。 +// 调用方需保证只调一次(wire ProvideChannelMonitorRunner 内只调一次)。 +func (r *ChannelMonitorRunner) Start() { + if r == nil || r.svc == nil { + return + } + // 容量 5 的 pond 池:超出时调用方等待,避免调度堆积无限增长。 + r.pool = pond.NewPool(monitorWorkerConcurrency) + + r.wg.Add(2) + go r.dueCheckLoop() + go r.cleanupLoop() +} + +// Stop 优雅停止:close stopCh -> 等待两个 loop 退出 -> 池 drain。 +func (r *ChannelMonitorRunner) Stop() { + if r == nil { + return + } + r.once.Do(func() { + close(r.stopCh) + }) + r.wg.Wait() + if r.pool != nil { + r.pool.StopAndWait() + } +} + +// dueCheckLoop 每 monitorTickerInterval 扫描一次"到期监控",提交到池。 +func (r *ChannelMonitorRunner) dueCheckLoop() { + defer r.wg.Done() + + ticker := time.NewTicker(monitorTickerInterval) + defer ticker.Stop() + + for { + select { + case <-r.stopCh: + return + case <-ticker.C: + r.tickDueChecks() + } + } +} + +// tickDueChecks 一次扫描:查询到期监控并逐个提交到池。 +// 已在执行的 monitor 会被跳过(防止单次检测耗时 > interval 时重复调度)。 +// 池满时使用 TrySubmit 跳过(不能阻塞 ticker),同时立即释放已占用的 inFlight 槽。 +func (r *ChannelMonitorRunner) tickDueChecks() { + ctx, cancel := context.WithTimeout(context.Background(), monitorListDueTimeout) + defer cancel() + + due, err := r.svc.listDueForCheck(ctx) + if err != nil { + slog.Warn("channel_monitor: list due failed", "error", err) + return + } + for _, m := range due { + monitor := m + if !r.tryAcquireInFlight(monitor.ID) { + slog.Debug("channel_monitor: skip already in-flight", + "monitor_id", monitor.ID, "name", monitor.Name) + continue + } + if _, ok := r.pool.TrySubmit(func() { + r.runOne(monitor.ID, monitor.Name) + }); !ok { + // 池满:丢弃本次检测,但必须释放已占用的 inFlight 槽,否则该 monitor 会被永久卡住。 + r.releaseInFlight(monitor.ID) + slog.Warn("channel_monitor: worker pool full, skip submission", + "monitor_id", monitor.ID, "name", monitor.Name) + } + } +} + +// tryAcquireInFlight 原子地占用 monitor 的 in-flight 槽。 +// 已被占用返回 false(调用方应跳过本次提交)。 +func (r *ChannelMonitorRunner) tryAcquireInFlight(id int64) bool { + r.inFlightMu.Lock() + defer r.inFlightMu.Unlock() + if _, exists := r.inFlight[id]; exists { + return false + } + r.inFlight[id] = struct{}{} + return true +} + +// releaseInFlight 释放 in-flight 槽。runOne 完成(含 panic recover)后必须调用。 +func (r *ChannelMonitorRunner) releaseInFlight(id int64) { + r.inFlightMu.Lock() + delete(r.inFlight, id) + r.inFlightMu.Unlock() +} + +// runOne 执行单个监控的检测。所有错误只记日志,不熔断。 +// 任务结束时(含 panic recover)必须释放 in-flight 槽。 +// +// 单次解密路径:调 RunCheckByID,内部统一 Get + APIKeyDecryptFailed 判定 + 跑检测, +// 避免 runner 自己再 Get 一次造成密文二次解密。 +func (r *ChannelMonitorRunner) runOne(id int64, name string) { + // 单次任务上限 = 请求超时 + ping + 一些缓冲。 + ctx, cancel := context.WithTimeout(context.Background(), monitorRequestTimeout+monitorPingTimeout+monitorRunOneBuffer) + defer cancel() + + defer r.releaseInFlight(id) + + defer func() { + if rec := recover(); rec != nil { + slog.Error("channel_monitor: runner panic", + "monitor_id", id, "name", name, "panic", rec) + } + }() + + if _, err := r.svc.RunCheck(ctx, id); err != nil { + // ErrChannelMonitorAPIKeyDecryptFailed 是预期可恢复错误,降为 Warn 即可。 + slog.Warn("channel_monitor: run check failed", + "monitor_id", id, "name", name, "error", err) + } +} + +// cleanupLoop 每小时检查当前时间,到 monitorCleanupHour 点(且当天还没清理过)则跑一次清理。 +// 启动时立即检查一次,避免长时间运行才跑首次清理。 +func (r *ChannelMonitorRunner) cleanupLoop() { + defer r.wg.Done() + + ticker := time.NewTicker(monitorCleanupCheckInterval) + defer ticker.Stop() + + r.maybeRunCleanup() + for { + select { + case <-r.stopCh: + return + case <-ticker.C: + r.maybeRunCleanup() + } + } +} + +// maybeRunCleanup 如果当前小时是 monitorCleanupHour 且当天未跑过,则执行清理。 +func (r *ChannelMonitorRunner) maybeRunCleanup() { + now := time.Now() + if now.Hour() != monitorCleanupHour { + return + } + day := now.Format(monitorCleanupDayLayout) + + r.cleanupMu.Lock() + if r.lastCleanupDay == day { + r.cleanupMu.Unlock() + return + } + r.lastCleanupDay = day + r.cleanupMu.Unlock() + + ctx, cancel := context.WithTimeout(context.Background(), monitorCleanupTimeout) + defer cancel() + if err := r.svc.cleanupOldHistory(ctx); err != nil { + slog.Warn("channel_monitor: cleanup history failed", "error", err) + } +} diff --git a/backend/internal/service/channel_monitor_service.go b/backend/internal/service/channel_monitor_service.go new file mode 100644 index 00000000..b179e50c --- /dev/null +++ b/backend/internal/service/channel_monitor_service.go @@ -0,0 +1,374 @@ +package service + +import ( + "context" + "fmt" + "log/slog" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" +) + +// ChannelMonitorRepository 渠道监控数据访问接口。 +// 入参/返回的指针类型均使用 service 包的 ChannelMonitor 模型, +// repository 实现负责与 ent 模型互转,并保持 api_key_encrypted 字段为密文。 +type ChannelMonitorRepository interface { + // CRUD + Create(ctx context.Context, m *ChannelMonitor) error + GetByID(ctx context.Context, id int64) (*ChannelMonitor, error) + Update(ctx context.Context, m *ChannelMonitor) error + Delete(ctx context.Context, id int64) error + List(ctx context.Context, params ChannelMonitorListParams) ([]*ChannelMonitor, int64, error) + + // 调度器辅助 + ListEnabled(ctx context.Context) ([]*ChannelMonitor, error) + MarkChecked(ctx context.Context, id int64, checkedAt time.Time) error + InsertHistoryBatch(ctx context.Context, rows []*ChannelMonitorHistoryRow) error + DeleteHistoryBefore(ctx context.Context, before time.Time) (int64, error) + + // 历史记录 + ListHistory(ctx context.Context, monitorID int64, model string, limit int) ([]*ChannelMonitorHistoryEntry, error) + + // 用户视图聚合 + ListLatestPerModel(ctx context.Context, monitorID int64) ([]*ChannelMonitorLatest, error) + ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*ChannelMonitorAvailability, error) + + // 批量聚合(admin/user list 用,避免 N+1) + ListLatestForMonitorIDs(ctx context.Context, ids []int64) (map[int64][]*ChannelMonitorLatest, error) + ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*ChannelMonitorAvailability, error) +} + +// ChannelMonitorService 渠道监控管理服务。 +type ChannelMonitorService struct { + repo ChannelMonitorRepository + encryptor SecretEncryptor +} + +// NewChannelMonitorService 创建渠道监控服务实例。 +func NewChannelMonitorService(repo ChannelMonitorRepository, encryptor SecretEncryptor) *ChannelMonitorService { + return &ChannelMonitorService{repo: repo, encryptor: encryptor} +} + +// ---------- CRUD ---------- + +// List 列表查询(支持 provider/enabled/search 过滤 + 分页)。 +// 返回的 ChannelMonitor.APIKey 已解密为明文,handler 层负责脱敏。 +func (s *ChannelMonitorService) List(ctx context.Context, params ChannelMonitorListParams) ([]*ChannelMonitor, int64, error) { + if params.Page < 1 { + params.Page = 1 + } + if params.PageSize < 1 || params.PageSize > 200 { + params.PageSize = 20 + } + items, total, err := s.repo.List(ctx, params) + if err != nil { + return nil, 0, fmt.Errorf("list channel monitors: %w", err) + } + for _, it := range items { + s.decryptInPlace(it) + } + return items, total, nil +} + +// Get 查询单个监控(解密 API Key)。 +func (s *ChannelMonitorService) Get(ctx context.Context, id int64) (*ChannelMonitor, error) { + m, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + s.decryptInPlace(m) + return m, nil +} + +// Create 创建监控(内部加密 api_key)。 +func (s *ChannelMonitorService) Create(ctx context.Context, p ChannelMonitorCreateParams) (*ChannelMonitor, error) { + if err := validateCreateParams(p); err != nil { + return nil, err + } + encrypted, err := s.encryptor.Encrypt(p.APIKey) + if err != nil { + return nil, fmt.Errorf("encrypt api key: %w", err) + } + m := &ChannelMonitor{ + Name: strings.TrimSpace(p.Name), + Provider: p.Provider, + Endpoint: normalizeEndpoint(p.Endpoint), + APIKey: encrypted, // 注意:传入 repository 时该字段为密文 + PrimaryModel: strings.TrimSpace(p.PrimaryModel), + ExtraModels: normalizeModels(p.ExtraModels), + GroupName: strings.TrimSpace(p.GroupName), + Enabled: p.Enabled, + IntervalSeconds: p.IntervalSeconds, + CreatedBy: p.CreatedBy, + } + if err := s.repo.Create(ctx, m); err != nil { + return nil, fmt.Errorf("create channel monitor: %w", err) + } + // 不再调 s.Get 重走解密链:已知刚加密的明文,直接构造响应。 + // 这样可避免 SecretEncryptor 解密失败时 APIKey 被静默清空的问题(见 Fix 4)。 + m.APIKey = strings.TrimSpace(p.APIKey) + return m, nil +} + +// validateCreateParams 把 Create 入参的所有校验聚拢为一个函数,避免 Create 主体超过 30 行。 +func validateCreateParams(p ChannelMonitorCreateParams) error { + if err := validateProvider(p.Provider); err != nil { + return err + } + if err := validateInterval(p.IntervalSeconds); err != nil { + return err + } + if err := validateEndpoint(p.Endpoint); err != nil { + return err + } + if strings.TrimSpace(p.APIKey) == "" { + return ErrChannelMonitorMissingAPIKey + } + if strings.TrimSpace(p.PrimaryModel) == "" { + return ErrChannelMonitorMissingPrimaryModel + } + return nil +} + +// Update 更新监控。APIKey 字段:nil 或空字符串 = 不修改;非空 = 加密后覆盖。 +func (s *ChannelMonitorService) Update(ctx context.Context, id int64, p ChannelMonitorUpdateParams) (*ChannelMonitor, error) { + existing, err := s.repo.GetByID(ctx, id) + if err != nil { + return nil, err + } + if err := applyMonitorUpdate(existing, p); err != nil { + return nil, err + } + + newPlainAPIKey, apiKeyUpdated, err := s.applyAPIKeyUpdate(existing, p.APIKey) + if err != nil { + return nil, err + } + + if err := s.repo.Update(ctx, existing); err != nil { + return nil, fmt.Errorf("update channel monitor: %w", err) + } + + // 不再调 s.Get 重走解密链:避免二次解密带来的"密文被静默清空"风险(与 Create 一致)。 + if apiKeyUpdated { + existing.APIKey = newPlainAPIKey + } else { + s.decryptInPlace(existing) + } + return existing, nil +} + +// applyAPIKeyUpdate 处理 Update 中的 APIKey 字段: +// - 入参 raw 为 nil 或空白:不修改 existing.APIKey(仍为密文),返回 updated=false +// - 非空:加密后写入 existing.APIKey;同时把明文返回给调用方, +// 供写库成功后塞回 existing 避免把密文吐回客户端 +func (s *ChannelMonitorService) applyAPIKeyUpdate(existing *ChannelMonitor, raw *string) (plain string, updated bool, err error) { + if raw == nil || strings.TrimSpace(*raw) == "" { + return "", false, nil + } + plain = strings.TrimSpace(*raw) + encrypted, encErr := s.encryptor.Encrypt(plain) + if encErr != nil { + return "", false, fmt.Errorf("encrypt api key: %w", encErr) + } + existing.APIKey = encrypted + return plain, true, nil +} + +// Delete 删除监控(历史通过外键 CASCADE 自动清理)。 +func (s *ChannelMonitorService) Delete(ctx context.Context, id int64) error { + if err := s.repo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete channel monitor: %w", err) + } + return nil +} + +// ListHistory 列出某个监控最近的检测历史。 +// model 为空表示返回所有模型;limit <= 0 时使用默认值,超过上限会被截断。 +func (s *ChannelMonitorService) ListHistory(ctx context.Context, id int64, model string, limit int) ([]*ChannelMonitorHistoryEntry, error) { + if _, err := s.repo.GetByID(ctx, id); err != nil { + return nil, err + } + if limit <= 0 { + limit = MonitorHistoryDefaultLimit + } + if limit > MonitorHistoryMaxLimit { + limit = MonitorHistoryMaxLimit + } + entries, err := s.repo.ListHistory(ctx, id, strings.TrimSpace(model), limit) + if err != nil { + return nil, fmt.Errorf("list history: %w", err) + } + return entries, nil +} + +// ---------- 业务 ---------- + +// RunCheck 同步触发对一个监控的检测:并发跑 primary + extra 模型, +// 写历史记录并更新 last_checked_at。返回每个模型的检测结果。 +func (s *ChannelMonitorService) RunCheck(ctx context.Context, id int64) ([]*CheckResult, error) { + m, err := s.Get(ctx, id) // 已解密 APIKey + if err != nil { + return nil, err + } + if m.APIKeyDecryptFailed { + return nil, ErrChannelMonitorAPIKeyDecryptFailed + } + results := s.runChecksConcurrent(ctx, m) + s.persistCheckResults(ctx, m, results) + return results, nil +} + +// persistCheckResults 写入本次检测的历史记录并更新 last_checked_at。 +// 任一写库失败都只记日志,不影响调用方拿到 results(与 MVP 期望一致:宁可漏记历史也要先返回结果)。 +func (s *ChannelMonitorService) persistCheckResults(ctx context.Context, m *ChannelMonitor, results []*CheckResult) { + rows := make([]*ChannelMonitorHistoryRow, 0, len(results)) + for _, r := range results { + rows = append(rows, &ChannelMonitorHistoryRow{ + MonitorID: m.ID, + Model: r.Model, + Status: r.Status, + LatencyMs: r.LatencyMs, + PingLatencyMs: r.PingLatencyMs, + Message: r.Message, + CheckedAt: r.CheckedAt, + }) + } + if err := s.repo.InsertHistoryBatch(ctx, rows); err != nil { + slog.Error("channel_monitor: insert history failed", + "monitor_id", m.ID, "name", m.Name, "error", err) + } + if err := s.repo.MarkChecked(ctx, m.ID, time.Now()); err != nil { + slog.Error("channel_monitor: mark checked failed", + "monitor_id", m.ID, "error", err) + } +} + +// runChecksConcurrent 对 primary + extra 模型并发执行检测。 +// errgroup 仅用于等待,不传播错误(每个 model 失败都已打包进 CheckResult)。 +func (s *ChannelMonitorService) runChecksConcurrent(ctx context.Context, m *ChannelMonitor) []*CheckResult { + models := append([]string{m.PrimaryModel}, m.ExtraModels...) + results := make([]*CheckResult, len(models)) + + // ping 共享一次,所有模型记录同一个 ping 延迟。 + pingMs := pingEndpointOrigin(ctx, m.Endpoint) + + var eg errgroup.Group + var mu sync.Mutex + for i, model := range models { + i, model := i, model + eg.Go(func() error { + r := runCheckForModel(ctx, m.Provider, m.Endpoint, m.APIKey, model) + r.PingLatencyMs = pingMs + mu.Lock() + results[i] = r + mu.Unlock() + return nil + }) + } + _ = eg.Wait() + return results +} + +// ---------- 调度器内部 ---------- + +// listDueForCheck 返回需要立即检测的监控列表: +// enabled=true AND (last_checked_at IS NULL OR last_checked_at + interval <= now)。 +// 实现下沉到 repository(用 SQL 表达式比较),减少应用层数据传输。 +func (s *ChannelMonitorService) listDueForCheck(ctx context.Context) ([]*ChannelMonitor, error) { + all, err := s.repo.ListEnabled(ctx) + if err != nil { + return nil, err + } + now := time.Now() + due := make([]*ChannelMonitor, 0, len(all)) + for _, m := range all { + if m.LastCheckedAt == nil { + due = append(due, m) + continue + } + nextAt := m.LastCheckedAt.Add(time.Duration(m.IntervalSeconds) * time.Second) + if !nextAt.After(now) { + due = append(due, m) + } + } + return due, nil +} + +// cleanupOldHistory 删除 monitorHistoryRetentionDays 天之前的历史记录。 +func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error { + before := time.Now().AddDate(0, 0, -monitorHistoryRetentionDays) + deleted, err := s.repo.DeleteHistoryBefore(ctx, before) + if err != nil { + return fmt.Errorf("delete history before %s: %w", before.Format(time.RFC3339), err) + } + if deleted > 0 { + slog.Info("channel_monitor: history cleanup", + "deleted_rows", deleted, "before", before.Format(time.RFC3339)) + } + return nil +} + +// ---------- helpers ---------- + +// decryptInPlace 把 ChannelMonitor.APIKey 从密文解密为明文。 +// 解密失败时把字段清空 + 设置 APIKeyDecryptFailed=true(不返回错误,避免阻断列表渲染)。 +// runner / RunCheck 必须读取该标志位并拒绝执行检测。 +func (s *ChannelMonitorService) decryptInPlace(m *ChannelMonitor) { + if m == nil || m.APIKey == "" { + return + } + plain, err := s.encryptor.Decrypt(m.APIKey) + if err != nil { + slog.Warn("channel_monitor: decrypt api key failed", + "monitor_id", m.ID, "error", err) + m.APIKey = "" + m.APIKeyDecryptFailed = true + return + } + m.APIKey = plain +} + +// applyMonitorUpdate 把 update params 中非 nil 的字段应用到 existing 上。 +// APIKey 字段在调用方单独处理(涉及加密)。 +// +// 行数稍超过 30:这是逐字段平铺的 dispatcher,每个 if 都是 1-3 行的"非 nil 则覆盖"模式, +// 拆分反而会增加跳转噪音、影响可读性,故保留为单函数。 +func applyMonitorUpdate(existing *ChannelMonitor, p ChannelMonitorUpdateParams) error { + if p.Name != nil { + existing.Name = strings.TrimSpace(*p.Name) + } + if p.Provider != nil { + if err := validateProvider(*p.Provider); err != nil { + return err + } + existing.Provider = *p.Provider + } + if p.Endpoint != nil { + if err := validateEndpoint(*p.Endpoint); err != nil { + return err + } + existing.Endpoint = normalizeEndpoint(*p.Endpoint) + } + if p.PrimaryModel != nil { + existing.PrimaryModel = strings.TrimSpace(*p.PrimaryModel) + } + if p.ExtraModels != nil { + existing.ExtraModels = normalizeModels(*p.ExtraModels) + } + if p.GroupName != nil { + existing.GroupName = strings.TrimSpace(*p.GroupName) + } + if p.Enabled != nil { + existing.Enabled = *p.Enabled + } + if p.IntervalSeconds != nil { + if err := validateInterval(*p.IntervalSeconds); err != nil { + return err + } + existing.IntervalSeconds = *p.IntervalSeconds + } + return nil +} diff --git a/backend/internal/service/channel_monitor_ssrf.go b/backend/internal/service/channel_monitor_ssrf.go new file mode 100644 index 00000000..8d93f600 --- /dev/null +++ b/backend/internal/service/channel_monitor_ssrf.go @@ -0,0 +1,152 @@ +package service + +import ( + "context" + "net" + "strings" +) + +// SSRF 防护 helper: +// - validateEndpoint 在 admin 提交时阻止 http/loopback/私网/云元数据 URL +// - safeDialContext 在 socket 层再次校验真实 IP,防止 DNS rebinding +// +// 已知 cloud metadata hostname 拒绝列表(小写比较)。 +var monitorBlockedHostnames = map[string]struct{}{ + "localhost": {}, + "localhost.localdomain": {}, + "metadata": {}, + "metadata.google.internal": {}, + "metadata.goog": {}, + "instance-data": {}, + "instance-data.ec2.internal": {}, +} + +// CIDR 列表:包含所有需要拒绝的 IPv4/IPv6 段。 +// 解析时只 panic 一次(启动时确认),生产路径只做 Contains。 +var monitorBlockedCIDRs = mustParseCIDRs([]string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "169.254.0.0/16", // link-local(含云元数据 169.254.169.254) + "100.64.0.0/10", // CGNAT + "0.0.0.0/8", // "this network" + "::1/128", // IPv6 loopback + "fc00::/7", // IPv6 ULA + "fe80::/10", // IPv6 link-local + "::/128", // IPv6 unspecified +}) + +// monitorDialer 共享 Dialer,与 net/http 默认值对齐。 +var monitorDialer = &net.Dialer{ + Timeout: monitorDialTimeout, + KeepAlive: monitorDialKeepAlive, +} + +// mustParseCIDRs 在包初始化时解析 CIDR 字符串,失败 panic。 +func mustParseCIDRs(cidrs []string) []*net.IPNet { + out := make([]*net.IPNet, 0, len(cidrs)) + for _, c := range cidrs { + _, n, err := net.ParseCIDR(c) + if err != nil { + panic("channel_monitor_ssrf: invalid CIDR " + c + ": " + err.Error()) + } + out = append(out, n) + } + return out +} + +// isBlockedHostname 判断 hostname 是否命中黑名单。 +func isBlockedHostname(hostname string) bool { + if hostname == "" { + return true + } + _, blocked := monitorBlockedHostnames[strings.ToLower(hostname)] + return blocked +} + +// isPrivateIP 判断 IP 是否落在禁止段(loopback/RFC1918/link-local/ULA 等)。 +func isPrivateIP(ip net.IP) bool { + if ip == nil { + return true + } + if ip.IsUnspecified() || ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() { + return true + } + for _, n := range monitorBlockedCIDRs { + if n.Contains(ip) { + return true + } + } + return false +} + +// isPrivateOrLoopbackHost 解析 hostname 的所有 A/AAAA 记录, +// 任一 IP 落在私网/loopback 段即认为不安全。 +// +// hostname 是 IP 字面量时也走同一路径。 +func isPrivateOrLoopbackHost(ctx context.Context, hostname string) (bool, error) { + if isBlockedHostname(hostname) { + return true, nil + } + // IP 字面量直接判断。 + if ip := net.ParseIP(hostname); ip != nil { + return isPrivateIP(ip), nil + } + resolver := net.DefaultResolver + addrs, err := resolver.LookupIPAddr(ctx, hostname) + if err != nil { + return false, err + } + if len(addrs) == 0 { + return true, nil + } + for _, a := range addrs { + if isPrivateIP(a.IP) { + return true, nil + } + } + return false, nil +} + +// safeDialContext 在真实 dial 前再次校验目标 IP,防止 DNS rebinding。 +// 解析 hostname 后逐个 IP 尝试连接,命中私网即拒绝(即便 validateEndpoint 时返回的是公网 IP)。 +func safeDialContext(ctx context.Context, network, address string) (net.Conn, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + // 字面量 IP 走快速路径。 + if ip := net.ParseIP(host); ip != nil { + if isPrivateIP(ip) { + return nil, &net.AddrError{Err: "blocked by SSRF policy", Addr: address} + } + return monitorDialer.DialContext(ctx, network, address) + } + if isBlockedHostname(host) { + return nil, &net.AddrError{Err: "blocked by SSRF policy", Addr: address} + } + addrs, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + return nil, &net.AddrError{Err: "no addresses for host", Addr: host} + } + var lastErr error + for _, a := range addrs { + if isPrivateIP(a.IP) { + lastErr = &net.AddrError{Err: "blocked by SSRF policy", Addr: a.IP.String()} + continue + } + conn, err := monitorDialer.DialContext(ctx, network, net.JoinHostPort(a.IP.String(), port)) + if err == nil { + return conn, nil + } + lastErr = err + } + if lastErr == nil { + lastErr = &net.AddrError{Err: "no usable addresses", Addr: host} + } + return nil, lastErr +} diff --git a/backend/internal/service/channel_monitor_types.go b/backend/internal/service/channel_monitor_types.go new file mode 100644 index 00000000..4b34d8af --- /dev/null +++ b/backend/internal/service/channel_monitor_types.go @@ -0,0 +1,161 @@ +package service + +import "time" + +// ChannelMonitor 渠道监控配置(service 层模型,不直接暴露 ent 类型)。 +type ChannelMonitor struct { + ID int64 + Name string + Provider string + Endpoint string + APIKey string // 解密后的明文 API Key(仅在 service 内部使用,handler 层不应直接序列化返回) + PrimaryModel string + ExtraModels []string + GroupName string + Enabled bool + IntervalSeconds int + LastCheckedAt *time.Time + CreatedBy int64 + CreatedAt time.Time + UpdatedAt time.Time + + // APIKeyDecryptFailed 表示 APIKey 字段无法解密(密钥不一致或损坏)。 + // 此时 APIKey 为空字符串,runner / RunCheck 必须跳过该监控并提示重填。 + APIKeyDecryptFailed bool +} + +// ChannelMonitorListParams 列表查询过滤参数。 +type ChannelMonitorListParams struct { + Page int + PageSize int + Provider string + Enabled *bool + Search string +} + +// ChannelMonitorCreateParams 创建参数。 +type ChannelMonitorCreateParams struct { + Name string + Provider string + Endpoint string + APIKey string + PrimaryModel string + ExtraModels []string + GroupName string + Enabled bool + IntervalSeconds int + CreatedBy int64 +} + +// ChannelMonitorUpdateParams 更新参数(指针字段表示"未提供则不更新")。 +type ChannelMonitorUpdateParams struct { + Name *string + Provider *string + Endpoint *string + APIKey *string // 空字符串表示不修改;非空字符串覆盖 + PrimaryModel *string + ExtraModels *[]string + GroupName *string + Enabled *bool + IntervalSeconds *int +} + +// CheckResult 单个模型一次检测的结果。 +type CheckResult struct { + Model string + Status string // operational / degraded / failed / error + LatencyMs *int + PingLatencyMs *int + Message string + CheckedAt time.Time +} + +// UserMonitorView 用户只读视图:监控概览(含主模型最近状态 + 7d 可用率 + 附加模型最近状态)。 +type UserMonitorView struct { + ID int64 + Name string + Provider string + GroupName string + PrimaryModel string + PrimaryStatus string + PrimaryLatencyMs *int + Availability7d float64 // 0-100 + ExtraModels []ExtraModelStatus +} + +// ExtraModelStatus 附加模型最近一次状态。 +type ExtraModelStatus struct { + Model string + Status string + LatencyMs *int +} + +// UserMonitorDetail 用户只读视图:监控详情(含全部模型 7d/15d/30d 可用率与平均延迟)。 +type UserMonitorDetail struct { + ID int64 + Name string + Provider string + GroupName string + Models []ModelDetail +} + +// ModelDetail 单个模型的可用率/延迟统计。 +type ModelDetail struct { + Model string + LatestStatus string + LatestLatencyMs *int + Availability7d float64 // 0-100 + Availability15d float64 + Availability30d float64 + AvgLatency7dMs *int +} + +// ChannelMonitorHistoryRow 历史记录入库行(service 层向 repository 提交的数据)。 +type ChannelMonitorHistoryRow struct { + MonitorID int64 + Model string + Status string + LatencyMs *int + PingLatencyMs *int + Message string + CheckedAt time.Time +} + +// ChannelMonitorHistoryEntry 历史记录查询返回行(含 ent 主键 ID)。 +type ChannelMonitorHistoryEntry struct { + ID int64 + Model string + Status string + LatencyMs *int + PingLatencyMs *int + Message string + CheckedAt time.Time +} + +// ChannelMonitorLatest 最近一次检测的简明信息(用于 UserMonitorView 聚合)。 +type ChannelMonitorLatest struct { + Model string + Status string + LatencyMs *int + CheckedAt time.Time +} + +// ChannelMonitorAvailability 单个模型在某窗口内的可用率与平均延迟(用于 UserMonitorDetail 聚合)。 +type ChannelMonitorAvailability struct { + Model string + WindowDays int + TotalChecks int + OperationalChecks int // operational + degraded 视为可用 + AvailabilityPct float64 + AvgLatencyMs *int +} + +// MonitorStatusSummary 监控状态聚合(admin list 用,单次 repo 查询消除前端 N+1)。 +// PrimaryStatus / PrimaryLatencyMs 描述主模型最近状态;Availability7d 是主模型 7 天可用率; +// ExtraModels 描述附加模型最近状态(用于 hover 展示)。 +type MonitorStatusSummary struct { + PrimaryStatus string // 空字符串表示无历史 + PrimaryLatencyMs *int + Availability7d float64 // 0-100,无历史时为 0 + ExtraModels []ExtraModelStatus +} diff --git a/backend/internal/service/channel_monitor_validate.go b/backend/internal/service/channel_monitor_validate.go new file mode 100644 index 00000000..16bbec71 --- /dev/null +++ b/backend/internal/service/channel_monitor_validate.go @@ -0,0 +1,99 @@ +package service + +import ( + "context" + "net/url" + "strings" +) + +// 渠道监控参数校验与归一化辅助函数。 +// 校验失败一律返回 channel_monitor_const.go 中预定义的 Err* 错误,错误信息不含具体 IP/hostname,避免泄露内网拓扑。 + +// validateProvider 校验 provider 字符串。 +// 唯一来源于 providerAdapters:新增 provider 只需要在 channel_monitor_checker.go 注册 adapter。 +func validateProvider(p string) error { + if !isSupportedProvider(p) { + return ErrChannelMonitorInvalidProvider + } + return nil +} + +// validateInterval 校验 interval_seconds 范围。 +func validateInterval(sec int) error { + if sec < monitorMinIntervalSeconds || sec > monitorMaxIntervalSeconds { + return ErrChannelMonitorInvalidInterval + } + return nil +} + +// validateEndpoint 校验 endpoint: +// - scheme 强制 https(拒绝 http,避免明文凭证 + 部分 SSRF 利用面) +// - 必须为 origin(无 path/query/fragment),防止用户填 https://api.openai.com/v1 +// 导致 joinURL 拼出 /v1/v1/chat/completions +// - hostname 不能是 localhost/metadata 等已知元数据 hostname +// - 解析所有 IP,任一落在 loopback/RFC1918/link-local/ULA 段即拒绝(防 SSRF) +// +// 错误信息不暴露具体 IP / hostname,避免泄露内网拓扑。 +func validateEndpoint(ep string) error { + ep = strings.TrimSpace(ep) + if ep == "" { + return ErrChannelMonitorInvalidEndpoint + } + u, err := url.Parse(ep) + if err != nil { + return ErrChannelMonitorInvalidEndpoint + } + if u.Scheme != "https" { + return ErrChannelMonitorEndpointScheme + } + if u.Host == "" { + return ErrChannelMonitorInvalidEndpoint + } + if u.Path != "" && u.Path != "/" { + return ErrChannelMonitorEndpointPath + } + if u.RawQuery != "" || u.Fragment != "" { + return ErrChannelMonitorEndpointPath + } + + hostname := u.Hostname() + ctx, cancel := context.WithTimeout(context.Background(), monitorEndpointResolveTimeout) + defer cancel() + blocked, err := isPrivateOrLoopbackHost(ctx, hostname) + if err != nil { + return ErrChannelMonitorEndpointUnreachable + } + if blocked { + return ErrChannelMonitorEndpointPrivate + } + return nil +} + +// normalizeEndpoint 去除前后空白与末尾 `/`,保证存储统一为 origin。 +// validateEndpoint 已确保格式合法(仅 origin),这里只做最终归一化。 +func normalizeEndpoint(ep string) string { + ep = strings.TrimSpace(ep) + ep = strings.TrimRight(ep, "/") + return ep +} + +// normalizeModels 去除空白、重复模型名。保留输入顺序(map 的迭代顺序无关)。 +func normalizeModels(in []string) []string { + if len(in) == 0 { + return []string{} + } + seen := make(map[string]struct{}, len(in)) + out := make([]string, 0, len(in)) + for _, m := range in { + m = strings.TrimSpace(m) + if m == "" { + continue + } + if _, ok := seen[m]; ok { + continue + } + seen[m] = struct{}{} + out = append(out, m) + } + return out +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 9f33c46a..ce933798 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -467,6 +467,8 @@ var ProviderSet = wire.NewSet( NewPaymentService, ProvidePaymentOrderExpiryService, ProvideBalanceNotifyService, + ProvideChannelMonitorService, + ProvideChannelMonitorRunner, ) // ProvidePaymentConfigService wraps NewPaymentConfigService to accept the named @@ -486,3 +488,20 @@ func ProvidePaymentOrderExpiryService(paymentSvc *PaymentService) *PaymentOrderE svc.Start() return svc } + +// ProvideChannelMonitorService 创建渠道监控服务(CRUD + RunCheck + 用户视图聚合)。 +// 加密器复用 wire 中已注入的 SecretEncryptor(AES-256-GCM)。 +func ProvideChannelMonitorService( + repo ChannelMonitorRepository, + encryptor SecretEncryptor, +) *ChannelMonitorService { + return NewChannelMonitorService(repo, encryptor) +} + +// ProvideChannelMonitorRunner 创建并启动渠道监控调度器。 +// Runner.Stop 由 cleanup function 调用。 +func ProvideChannelMonitorRunner(svc *ChannelMonitorService) *ChannelMonitorRunner { + r := NewChannelMonitorRunner(svc) + r.Start() + return r +} diff --git a/backend/migrations/125_add_channel_monitors.sql b/backend/migrations/125_add_channel_monitors.sql new file mode 100644 index 00000000..5ec327da --- /dev/null +++ b/backend/migrations/125_add_channel_monitors.sql @@ -0,0 +1,58 @@ +-- Migration: 125_add_channel_monitors +-- 渠道监控 MVP:周期性对外部 provider/endpoint/api_key 做模型心跳测试。 +-- +-- 表结构说明: +-- - channel_monitors 渠道配置表(一行 = 一个监控对象) +-- - channel_monitor_histories 检测历史明细表(一次检测一个模型 = 一行) +-- +-- 设计要点: +-- - api_key_encrypted 列存放 AES-256-GCM 密文(base64),由 service 层加密。 +-- - extra_models 用 JSONB 存储字符串数组,便于扩展(后续可加权重等元数据)。 +-- - history 表通过 ON DELETE CASCADE 自动清理已删除监控的历史。 +-- - (enabled, last_checked_at) 索引服务于调度器扫描“到期需要检测”的监控。 +-- - histories 上 (monitor_id, model, checked_at DESC) 服务用户视图聚合查询; +-- 单独的 (checked_at) 索引服务定期清理 30 天前数据的 DELETE。 + +CREATE TABLE IF NOT EXISTS channel_monitors ( + id BIGSERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + provider VARCHAR(20) NOT NULL, -- openai / anthropic / gemini + endpoint VARCHAR(500) NOT NULL, -- base origin + api_key_encrypted TEXT NOT NULL, -- AES-256-GCM (base64) + primary_model VARCHAR(200) NOT NULL, + extra_models JSONB NOT NULL DEFAULT '[]'::jsonb, + group_name VARCHAR(100) NOT NULL DEFAULT '', + enabled BOOLEAN NOT NULL DEFAULT TRUE, + interval_seconds INT NOT NULL, + last_checked_at TIMESTAMPTZ, + created_by BIGINT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT channel_monitors_provider_check CHECK (provider IN ('openai', 'anthropic', 'gemini')), + CONSTRAINT channel_monitors_interval_check CHECK (interval_seconds BETWEEN 15 AND 3600) +); + +CREATE INDEX IF NOT EXISTS idx_channel_monitors_enabled_last_checked + ON channel_monitors (enabled, last_checked_at); +CREATE INDEX IF NOT EXISTS idx_channel_monitors_provider + ON channel_monitors (provider); +CREATE INDEX IF NOT EXISTS idx_channel_monitors_group_name + ON channel_monitors (group_name); + +CREATE TABLE IF NOT EXISTS channel_monitor_histories ( + id BIGSERIAL PRIMARY KEY, + monitor_id BIGINT NOT NULL REFERENCES channel_monitors(id) ON DELETE CASCADE, + model VARCHAR(200) NOT NULL, + status VARCHAR(20) NOT NULL, + latency_ms INT, + ping_latency_ms INT, + message VARCHAR(500) NOT NULL DEFAULT '', + checked_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT channel_monitor_histories_status_check + CHECK (status IN ('operational', 'degraded', 'failed', 'error')) +); + +CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_monitor_model_checked + ON channel_monitor_histories (monitor_id, model, checked_at DESC); +CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_checked_at + ON channel_monitor_histories (checked_at); diff --git a/frontend/src/api/admin/channelMonitor.ts b/frontend/src/api/admin/channelMonitor.ts new file mode 100644 index 00000000..d9cc6aed --- /dev/null +++ b/frontend/src/api/admin/channelMonitor.ts @@ -0,0 +1,190 @@ +/** + * Admin Channel Monitor API endpoints + * Handles channel monitor (uptime/health) management for administrators + */ + +import { apiClient } from '../client' + +export type Provider = 'openai' | 'anthropic' | 'gemini' +export type MonitorStatus = 'operational' | 'degraded' | 'failed' | 'error' + +export interface ChannelMonitor { + id: number + name: string + provider: Provider + endpoint: string + api_key_masked: string + /** + * True when the stored encrypted API key cannot be decrypted (e.g. the + * encryption key has changed). Admin must re-edit the monitor to provide + * a fresh key. Backend skips checks for these monitors. + */ + api_key_decrypt_failed?: boolean + primary_model: string + extra_models: string[] + group_name: string + enabled: boolean + interval_seconds: number + last_checked_at: string | null + created_by: number + created_at: string + updated_at: string + /** Latest status of the primary model (empty when no history yet) */ + primary_status: MonitorStatus | '' + /** Latest latency of the primary model in ms (null when no history yet) */ + primary_latency_ms: number | null + /** Primary model 7-day availability percentage (0-100) */ + availability_7d: number + /** Latest status per extra model (used for hover tooltip) */ + extra_models_status: ExtraModelStatus[] +} + +export interface ExtraModelStatus { + model: string + status: MonitorStatus | '' + latency_ms: number | null +} + +export interface ListParams { + page?: number + page_size?: number + provider?: Provider + enabled?: boolean + search?: string +} + +export interface ListResponse { + items: ChannelMonitor[] + total: number + page: number + page_size: number + pages: number +} + +export interface CreateParams { + name: string + provider: Provider + endpoint: string + api_key: string + primary_model: string + extra_models?: string[] + group_name?: string + enabled?: boolean + interval_seconds: number +} + +// Update request: api_key empty string means "do not modify" +export type UpdateParams = Partial + +export interface CheckResult { + model: string + status: MonitorStatus + latency_ms: number | null + ping_latency_ms: number | null + message: string + checked_at: string +} + +export interface RunNowResponse { + results: CheckResult[] +} + +export interface HistoryItem { + id: number + model: string + status: MonitorStatus + latency_ms: number | null + ping_latency_ms: number | null + message: string + checked_at: string +} + +export interface HistoryParams { + model?: string + limit?: number +} + +export interface HistoryResponse { + items: HistoryItem[] +} + +/** + * List channel monitors with pagination and filters + */ +export async function list( + params: ListParams = {}, + options?: { signal?: AbortSignal } +): Promise { + const { data } = await apiClient.get('/admin/channel-monitors', { + params, + signal: options?.signal, + }) + return data +} + +/** + * Get a channel monitor by ID + */ +export async function get(id: number): Promise { + const { data } = await apiClient.get(`/admin/channel-monitors/${id}`) + return data +} + +/** + * Create a new channel monitor + */ +export async function create(params: CreateParams): Promise { + const { data } = await apiClient.post('/admin/channel-monitors', params) + return data +} + +/** + * Update an existing channel monitor. + * api_key field: empty string means "do not modify". + */ +export async function update(id: number, params: UpdateParams): Promise { + const { data } = await apiClient.put(`/admin/channel-monitors/${id}`, params) + return data +} + +/** + * Delete a channel monitor + */ +export async function del(id: number): Promise { + await apiClient.delete(`/admin/channel-monitors/${id}`) +} + +/** + * Trigger an immediate manual check for a channel monitor. + * Returns the latest check results for primary + extra models. + */ +export async function runNow(id: number): Promise { + const { data } = await apiClient.post(`/admin/channel-monitors/${id}/run`) + return data +} + +/** + * List historical check results for a monitor. + */ +export async function listHistory( + id: number, + params: HistoryParams = {} +): Promise { + const { data } = await apiClient.get( + `/admin/channel-monitors/${id}/history`, + { params } + ) + return data +} + +export const channelMonitorAPI = { + list, + get, + create, + update, + del, + runNow, + listHistory, +} + +export default channelMonitorAPI diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index 72597365..5e2a9959 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -26,6 +26,7 @@ import scheduledTestsAPI from './scheduledTests' import backupAPI from './backup' import tlsFingerprintProfileAPI from './tlsFingerprintProfile' import channelsAPI from './channels' +import channelMonitorAPI from './channelMonitor' import adminPaymentAPI from './payment' /** @@ -55,6 +56,7 @@ export const adminAPI = { backup: backupAPI, tlsFingerprintProfiles: tlsFingerprintProfileAPI, channels: channelsAPI, + channelMonitor: channelMonitorAPI, payment: adminPaymentAPI } @@ -82,6 +84,7 @@ export { backupAPI, tlsFingerprintProfileAPI, channelsAPI, + channelMonitorAPI, adminPaymentAPI } diff --git a/frontend/src/api/channelMonitor.ts b/frontend/src/api/channelMonitor.ts new file mode 100644 index 00000000..c5481636 --- /dev/null +++ b/frontend/src/api/channelMonitor.ts @@ -0,0 +1,74 @@ +/** + * User-facing Channel Monitor API endpoints + * Read-only views for end users to inspect channel availability/status. + */ + +import { apiClient } from './client' +import type { Provider, MonitorStatus } from './admin/channelMonitor' + +export type { Provider, MonitorStatus } from './admin/channelMonitor' + +export interface UserMonitorExtraModel { + model: string + status: MonitorStatus + latency_ms: number | null +} + +export interface UserMonitorView { + id: number + name: string + provider: Provider + group_name: string + primary_model: string + primary_status: MonitorStatus + primary_latency_ms: number | null + availability_7d: number + extra_models: UserMonitorExtraModel[] +} + +export interface UserMonitorListResponse { + items: UserMonitorView[] +} + +export interface UserMonitorModelDetail { + model: string + latest_status: MonitorStatus + latest_latency_ms: number | null + availability_7d: number + availability_15d: number + availability_30d: number + avg_latency_7d_ms: number | null +} + +export interface UserMonitorDetail { + id: number + name: string + provider: Provider + group_name: string + models: UserMonitorModelDetail[] +} + +/** + * List all monitor views available to the current user. + */ +export async function list(options?: { signal?: AbortSignal }): Promise { + const { data } = await apiClient.get('/channel-monitors', { + signal: options?.signal, + }) + return data +} + +/** + * Get detailed status (multi-window availability + latency) for a single monitor. + */ +export async function status(id: number): Promise { + const { data } = await apiClient.get(`/channel-monitors/${id}/status`) + return data +} + +export const channelMonitorUserAPI = { + list, + status, +} + +export default channelMonitorUserAPI diff --git a/frontend/src/api/index.ts b/frontend/src/api/index.ts index 6b3ef174..dd005a0d 100644 --- a/frontend/src/api/index.ts +++ b/frontend/src/api/index.ts @@ -18,6 +18,7 @@ export { paymentAPI } from './payment' export { userGroupsAPI } from './groups' export { totpAPI } from './totp' export { default as announcementsAPI } from './announcements' +export { channelMonitorUserAPI } from './channelMonitor' // Admin APIs export { adminAPI } from './admin' diff --git a/frontend/src/components/admin/monitor/MonitorActionsCell.vue b/frontend/src/components/admin/monitor/MonitorActionsCell.vue new file mode 100644 index 00000000..74aa4017 --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorActionsCell.vue @@ -0,0 +1,45 @@ + + + diff --git a/frontend/src/components/admin/monitor/MonitorFiltersBar.vue b/frontend/src/components/admin/monitor/MonitorFiltersBar.vue new file mode 100644 index 00000000..ebb06a68 --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorFiltersBar.vue @@ -0,0 +1,95 @@ + + + diff --git a/frontend/src/components/admin/monitor/MonitorFormDialog.vue b/frontend/src/components/admin/monitor/MonitorFormDialog.vue new file mode 100644 index 00000000..920c3f79 --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorFormDialog.vue @@ -0,0 +1,297 @@ + + + diff --git a/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue b/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue new file mode 100644 index 00000000..eefe4073 --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue @@ -0,0 +1,64 @@ + + + diff --git a/frontend/src/components/admin/monitor/MonitorPrimaryModelCell.vue b/frontend/src/components/admin/monitor/MonitorPrimaryModelCell.vue new file mode 100644 index 00000000..eccec828 --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorPrimaryModelCell.vue @@ -0,0 +1,71 @@ + + + diff --git a/frontend/src/components/admin/monitor/MonitorRunResultDialog.vue b/frontend/src/components/admin/monitor/MonitorRunResultDialog.vue new file mode 100644 index 00000000..02fa6e8d --- /dev/null +++ b/frontend/src/components/admin/monitor/MonitorRunResultDialog.vue @@ -0,0 +1,56 @@ + + + diff --git a/frontend/src/components/layout/AppSidebar.vue b/frontend/src/components/layout/AppSidebar.vue index 92dcc519..23d0f4e9 100644 --- a/frontend/src/components/layout/AppSidebar.vue +++ b/frontend/src/components/layout/AppSidebar.vue @@ -38,7 +38,7 @@ 'sidebar-link-collapsed': sidebarCollapsed }" :title="sidebarCollapsed ? item.label : undefined" - @click="sidebarCollapsed ? undefined : toggleGroup(item)" + @click="handleGroupClick(item)" > import { computed, h, onMounted, ref, watch } from 'vue' -import { useRoute } from 'vue-router' +import { useRoute, useRouter } from 'vue-router' import { useI18n } from 'vue-i18n' import { useAdminSettingsStore, useAppStore, useAuthStore, useOnboardingStore } from '@/stores' import VersionBadge from '@/components/common/VersionBadge.vue' @@ -194,11 +194,17 @@ interface NavItem { iconSvg?: string hideInSimpleMode?: boolean children?: NavItem[] + /** + * When true, the parent item only toggles the expand/collapse state and + * does NOT navigate to its `path`. The `path` is purely a stable key. + */ + expandOnly?: boolean } const { t } = useI18n() const route = useRoute() +const router = useRouter() const appStore = useAppStore() const authStore = useAuthStore() const onboardingStore = useOnboardingStore() @@ -549,6 +555,41 @@ const ChevronDoubleRightIcon = { ) } +const SignalIcon = { + render: () => + h( + 'svg', + { fill: 'none', viewBox: '0 0 24 24', stroke: 'currentColor', 'stroke-width': '1.5' }, + [ + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M9.348 14.651a3.75 3.75 0 010-5.303m5.304 0a3.75 3.75 0 010 5.303m-7.425 2.122a6.75 6.75 0 010-9.546m9.546 0a6.75 6.75 0 010 9.546M5.106 18.894c-3.808-3.807-3.808-9.98 0-13.788m13.788 0c3.808 3.807 3.808 9.98 0 13.788M12 12h.008v.008H12V12zm.375 0a.375.375 0 11-.75 0 .375.375 0 01.75 0z' + }) + ] + ) +} + +const PriceTagIcon = { + render: () => + h( + 'svg', + { fill: 'none', viewBox: '0 0 24 24', stroke: 'currentColor', 'stroke-width': '1.5' }, + [ + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M9.568 3H5.25A2.25 2.25 0 003 5.25v4.318c0 .597.237 1.17.659 1.591l9.581 9.581c.699.699 1.78.872 2.607.33a18.095 18.095 0 005.223-5.223c.542-.827.369-1.908-.33-2.607L11.16 3.66A2.25 2.25 0 009.568 3z' + }), + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M6 6h.008v.008H6V6z' + }) + ] + ) +} + const ChevronDownIcon = { render: () => h( @@ -570,6 +611,7 @@ const userNavItems = computed((): NavItem[] => { { path: '/dashboard', label: t('nav.dashboard'), icon: DashboardIcon }, { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, + { path: '/monitor', label: t('nav.channelStatus'), icon: SignalIcon }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, ...(appStore.cachedPublicSettings?.payment_enabled ? [ @@ -608,6 +650,7 @@ const personalNavItems = computed((): NavItem[] => { const items: NavItem[] = [ { path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }, { path: '/usage', label: t('nav.usage'), icon: ChartIcon, hideInSimpleMode: true }, + { path: '/monitor', label: t('nav.channelStatus'), icon: SignalIcon }, { path: '/subscriptions', label: t('nav.mySubscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, ...(appStore.cachedPublicSettings?.payment_enabled ? [ @@ -664,7 +707,17 @@ const adminNavItems = computed((): NavItem[] => { : []), { path: '/admin/users', label: t('nav.users'), icon: UsersIcon, hideInSimpleMode: true }, { path: '/admin/groups', label: t('nav.groups'), icon: FolderIcon, hideInSimpleMode: true }, - { path: '/admin/channels', label: t('nav.channels', '渠道管理'), icon: ChannelIcon, hideInSimpleMode: true }, + { + path: '/admin/channels', + label: t('nav.channelManagement'), + icon: ChannelIcon, + hideInSimpleMode: true, + expandOnly: true, + children: [ + { path: '/admin/channels/pricing', label: t('nav.channelPricing'), icon: PriceTagIcon }, + { path: '/admin/channels/monitor', label: t('nav.channelMonitor'), icon: SignalIcon }, + ], + }, { path: '/admin/subscriptions', label: t('nav.subscriptions'), icon: CreditCardIcon, hideInSimpleMode: true }, { path: '/admin/accounts', label: t('nav.accounts'), icon: GlobeIcon }, { path: '/admin/announcements', label: t('nav.announcements'), icon: BellIcon }, @@ -678,6 +731,7 @@ const adminNavItems = computed((): NavItem[] => { label: t('nav.orderManagement'), icon: OrderIcon, hideInSimpleMode: true, + expandOnly: true, children: [ { path: '/admin/orders/dashboard', label: t('nav.paymentDashboard'), icon: ChartIcon }, { path: '/admin/orders', label: t('nav.orderManagement'), icon: OrderIcon }, @@ -764,6 +818,28 @@ function toggleGroup(item: NavItem) { } } +/** + * Click handler for collapsible parent items. + * - When sidebar is collapsed: do nothing (children are not visible). + * - When `expandOnly` is true: only toggle expand state. + * - Otherwise (default, e.g. /admin/orders): navigate to the parent path + * (router-link semantics) and ensure the group is expanded. + */ +function handleGroupClick(item: NavItem) { + if (sidebarCollapsed.value) return + if (item.expandOnly) { + toggleGroup(item) + return + } + // Push to path and ensure expanded + if (route.path !== item.path) { + router.push(item.path) + } + if (!expandedGroups.value.has(item.path)) { + expandedGroups.value.add(item.path) + } +} + // Initialize theme const savedTheme = localStorage.getItem('theme') if ( diff --git a/frontend/src/components/user/MonitorDetailDialog.vue b/frontend/src/components/user/MonitorDetailDialog.vue new file mode 100644 index 00000000..564f461b --- /dev/null +++ b/frontend/src/components/user/MonitorDetailDialog.vue @@ -0,0 +1,114 @@ + + + diff --git a/frontend/src/components/user/MonitorPrimaryModelCell.vue b/frontend/src/components/user/MonitorPrimaryModelCell.vue new file mode 100644 index 00000000..32620b2a --- /dev/null +++ b/frontend/src/components/user/MonitorPrimaryModelCell.vue @@ -0,0 +1,71 @@ + + + diff --git a/frontend/src/composables/useChannelMonitorFormat.ts b/frontend/src/composables/useChannelMonitorFormat.ts new file mode 100644 index 00000000..fbb310fa --- /dev/null +++ b/frontend/src/composables/useChannelMonitorFormat.ts @@ -0,0 +1,97 @@ +/** + * Shared formatting helpers for channel monitor views (admin + user). + * + * Centralises: + * - status / provider label + badge class lookups + * - latency / availability / percent number formatting + * + * i18n keys live under `monitorCommon.*` so admin and user views share the + * same translation source. + */ + +import { useI18n } from 'vue-i18n' +import type { MonitorStatus, Provider } from '@/api/admin/channelMonitor' +import { + PROVIDER_OPENAI, + PROVIDER_ANTHROPIC, + PROVIDER_GEMINI, + STATUS_OPERATIONAL, + STATUS_DEGRADED, + STATUS_FAILED, + STATUS_ERROR, +} from '@/constants/channelMonitor' + +const NEUTRAL_BADGE = 'bg-gray-100 text-gray-800 dark:bg-dark-700 dark:text-gray-300' + +export interface AvailabilityRow { + primary_status: MonitorStatus | '' + availability_7d: number | null | undefined +} + +export function useChannelMonitorFormat() { + const { t } = useI18n() + + function statusLabel(s: MonitorStatus | ''): string { + if (!s) return t('monitorCommon.status.unknown') + return t(`monitorCommon.status.${s}`) + } + + function statusBadgeClass(s: MonitorStatus | ''): string { + switch (s) { + case STATUS_OPERATIONAL: + return 'bg-green-100 text-green-800 dark:bg-green-900/30 dark:text-green-300' + case STATUS_DEGRADED: + return 'bg-yellow-100 text-yellow-800 dark:bg-yellow-900/30 dark:text-yellow-300' + case STATUS_FAILED: + return 'bg-red-100 text-red-800 dark:bg-red-900/30 dark:text-red-300' + case STATUS_ERROR: + default: + return NEUTRAL_BADGE + } + } + + function providerLabel(p: Provider | string): string { + if (p === PROVIDER_OPENAI || p === PROVIDER_ANTHROPIC || p === PROVIDER_GEMINI) { + return t(`monitorCommon.providers.${p}`) + } + return p || '-' + } + + function providerBadgeClass(p: Provider | string): string { + switch (p) { + case PROVIDER_OPENAI: + return 'bg-green-100 text-green-800 dark:bg-green-900/30 dark:text-green-300' + case PROVIDER_ANTHROPIC: + return 'bg-orange-100 text-orange-800 dark:bg-orange-900/30 dark:text-orange-300' + case PROVIDER_GEMINI: + return 'bg-blue-100 text-blue-800 dark:bg-blue-900/30 dark:text-blue-300' + default: + return NEUTRAL_BADGE + } + } + + function formatLatency(ms: number | null | undefined): string { + if (ms == null) return t('monitorCommon.latencyEmpty') + return String(Math.round(ms)) + } + + function formatPercent(v: number | null | undefined): string { + if (v == null || Number.isNaN(v)) return '-' + return `${v.toFixed(2)}%` + } + + function formatAvailability(row: AvailabilityRow): string { + if (!row.primary_status) return '-' + return formatPercent(row.availability_7d) + } + + return { + statusLabel, + statusBadgeClass, + providerLabel, + providerBadgeClass, + formatLatency, + formatPercent, + formatAvailability, + } +} diff --git a/frontend/src/constants/channelMonitor.ts b/frontend/src/constants/channelMonitor.ts new file mode 100644 index 00000000..7523a878 --- /dev/null +++ b/frontend/src/constants/channelMonitor.ts @@ -0,0 +1,35 @@ +/** + * Channel monitor shared constants. + * + * Single source of truth for provider/status string values used by both the + * admin (`views/admin/ChannelMonitorView.vue`) and user-facing + * (`views/user/ChannelStatusView.vue`) screens, plus the shared composable + * `useChannelMonitorFormat`. + */ + +import type { Provider, MonitorStatus } from '@/api/admin/channelMonitor' + +export const PROVIDER_OPENAI: Provider = 'openai' +export const PROVIDER_ANTHROPIC: Provider = 'anthropic' +export const PROVIDER_GEMINI: Provider = 'gemini' + +export const PROVIDERS: readonly Provider[] = [ + PROVIDER_OPENAI, + PROVIDER_ANTHROPIC, + PROVIDER_GEMINI, +] + +export const STATUS_OPERATIONAL: MonitorStatus = 'operational' +export const STATUS_DEGRADED: MonitorStatus = 'degraded' +export const STATUS_FAILED: MonitorStatus = 'failed' +export const STATUS_ERROR: MonitorStatus = 'error' + +export const MONITOR_STATUSES: readonly MonitorStatus[] = [ + STATUS_OPERATIONAL, + STATUS_DEGRADED, + STATUS_FAILED, + STATUS_ERROR, +] + +/** Default polling interval (seconds) for new monitors. */ +export const DEFAULT_INTERVAL_SECONDS = 60 diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 1b7ffa81..32fbce19 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -245,6 +245,7 @@ export default { // Common common: { loading: 'Loading...', + submitting: 'Submitting...', justNow: 'just now', save: 'Save', saved: 'Saved successfully', @@ -363,7 +364,11 @@ export default { orderManagement: 'Orders', paymentDashboard: 'Payment Dashboard', paymentConfig: 'Payment Config', - paymentPlans: 'Plans' + paymentPlans: 'Plans', + channelManagement: 'Channels', + channelPricing: 'Channel Pricing', + channelMonitor: 'Channel Monitor', + channelStatus: 'Channel Status', }, // Auth @@ -846,6 +851,58 @@ export default { userAgent: 'User-Agent' }, + // Shared keys for channel monitor (admin + user views) + monitorCommon: { + status: { + operational: 'Operational', + degraded: 'Degraded', + failed: 'Failed', + error: 'Error', + unknown: '-' + }, + providers: { + openai: 'OpenAI', + anthropic: 'Anthropic', + gemini: 'Gemini' + }, + extraModelsHeader: 'Extra Models', + extraModelsEmpty: 'No extra models', + latencyEmpty: '-' + }, + + // Channel Status (user-facing read-only view) + channelStatus: { + title: 'Channel Status', + description: 'Inspect channel availability, latency and recent status', + searchPlaceholder: 'Search channels...', + allProviders: 'All Providers', + loadError: 'Failed to load channel status', + detailLoadError: 'Failed to load channel detail', + detailTitle: 'Channel Detail', + closeDetail: 'Close', + columns: { + name: 'Name', + provider: 'Provider', + groupName: 'Group', + primaryModel: 'Primary Model', + availability7d: '7d Availability', + latency: 'Latency (ms)' + }, + detailColumns: { + model: 'Model', + latestStatus: 'Latest Status', + latestLatency: 'Latest Latency (ms)', + availability7d: '7d Availability', + availability15d: '15d Availability', + availability30d: '30d Availability', + avgLatency7d: '7d Avg Latency (ms)' + }, + empty: { + title: 'No channels available', + description: 'No monitored channels have been configured yet.' + } + }, + // Redeem redeem: { title: 'Redeem Code', @@ -2014,6 +2071,69 @@ export default { } }, + // Channel Monitor + channelMonitor: { + title: 'Channel Monitor', + description: 'Monitor channel availability, latency and status', + searchPlaceholder: 'Search monitor name...', + allProviders: 'All Providers', + allStatus: 'All Status', + enabledFilter: 'Enabled', + onlyEnabled: 'Enabled only', + onlyDisabled: 'Disabled only', + createButton: 'Create Monitor', + createTitle: 'Create Channel Monitor', + editTitle: 'Edit Channel Monitor', + runNow: 'Run Now', + runSuccess: 'Check completed', + runFailed: 'Check failed', + apiKeyDecryptFailed: 'API Key decryption failed. Please re-edit this monitor with a fresh key.', + createSuccess: 'Monitor created', + updateSuccess: 'Monitor updated', + deleteSuccess: 'Monitor deleted', + loadError: 'Failed to load monitors', + deleteConfirm: 'Are you sure you want to delete monitor "{name}"? This action cannot be undone.', + nameRequired: 'Please enter a monitor name', + primaryModelRequired: 'Please enter a primary model', + columns: { + name: 'Name', + provider: 'Provider', + primaryModel: 'Primary Model', + availability7d: '7d Availability', + latency: 'Latency (ms)', + enabled: 'Enabled', + actions: 'Actions' + }, + form: { + name: 'Name', + namePlaceholder: 'Enter monitor name', + provider: 'Provider', + endpoint: 'Endpoint', + endpointPlaceholder: 'https://api.example.com', + useCurrentDomain: 'Use current service', + apiKey: 'API Key', + apiKeyPlaceholder: 'Enter API Key', + apiKeyEditPlaceholder: 'Leave blank to keep current key', + useMyKey: 'Use my key', + selectKeyTitle: 'Select my API Key', + selectKeyHint: 'Only your active, non-expired keys are listed.', + noActiveKey: 'No active API keys available', + primaryModel: 'Primary Model', + primaryModelPlaceholder: 'gpt-4o-mini', + extraModels: 'Extra Models', + extraModelsPlaceholder: 'Press Enter to add extra model', + groupName: 'Group Name', + groupNamePlaceholder: 'Optional, used to group rows in user view', + intervalSeconds: 'Interval (seconds)', + intervalSecondsHint: 'Range: 15 - 3600 seconds', + enabled: 'Enable monitor', + kindRequired: 'Please select a provider' + }, + runResultTitle: 'Check Result', + noMonitorsYet: 'No monitors yet', + createFirstMonitor: 'Create your first monitor to track channel availability' + }, + // Subscriptions subscriptions: { title: 'Subscription Management', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index beb6841f..dd3af363 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -245,6 +245,7 @@ export default { // Common common: { loading: '加载中...', + submitting: '提交中...', justNow: '刚刚', save: '保存', saved: '保存成功', @@ -363,7 +364,11 @@ export default { orderManagement: '订单管理', paymentDashboard: '支付概览', paymentConfig: '支付配置', - paymentPlans: '订阅套餐' + paymentPlans: '订阅套餐', + channelManagement: '渠道管理', + channelPricing: '渠道定价', + channelMonitor: '渠道监控', + channelStatus: '渠道状态', }, // Auth @@ -850,6 +855,58 @@ export default { userAgent: 'User-Agent' }, + // Shared keys for channel monitor (admin + user views) + monitorCommon: { + status: { + operational: '正常', + degraded: '降级', + failed: '失败', + error: '错误', + unknown: '-' + }, + providers: { + openai: 'OpenAI', + anthropic: 'Anthropic', + gemini: 'Gemini' + }, + extraModelsHeader: '附加模型', + extraModelsEmpty: '无附加模型', + latencyEmpty: '-' + }, + + // Channel Status (user-facing read-only view) + channelStatus: { + title: '渠道状态', + description: '查看渠道可用性、延迟和近期状态', + searchPlaceholder: '搜索渠道...', + allProviders: '全部供应商', + loadError: '加载渠道状态失败', + detailLoadError: '加载渠道详情失败', + detailTitle: '渠道详情', + closeDetail: '关闭', + columns: { + name: '名称', + provider: '供应商', + groupName: '分组', + primaryModel: '主模型', + availability7d: '7 天可用率', + latency: '延迟 (ms)' + }, + detailColumns: { + model: '模型', + latestStatus: '最新状态', + latestLatency: '最新延迟 (ms)', + availability7d: '7 天可用率', + availability15d: '15 天可用率', + availability30d: '30 天可用率', + avgLatency7d: '7 天平均延迟 (ms)' + }, + empty: { + title: '暂无可显示的渠道', + description: '管理员尚未配置可监控的渠道。' + } + }, + // Redeem redeem: { title: '兑换码', @@ -2093,6 +2150,69 @@ export default { } }, + // Channel Monitor + channelMonitor: { + title: '渠道监控', + description: '监测各渠道的可用性、延迟和状态', + searchPlaceholder: '搜索监控名称...', + allProviders: '全部供应商', + allStatus: '全部状态', + enabledFilter: '启用状态', + onlyEnabled: '仅启用', + onlyDisabled: '仅禁用', + createButton: '新增监控', + createTitle: '新增渠道监控', + editTitle: '编辑渠道监控', + runNow: '立即检测', + runSuccess: '检测完成', + runFailed: '检测失败', + apiKeyDecryptFailed: 'API Key 解密失败,请重新编辑该监控并填入新的 Key', + createSuccess: '监控创建成功', + updateSuccess: '监控更新成功', + deleteSuccess: '监控删除成功', + loadError: '加载监控列表失败', + deleteConfirm: '确定要删除监控「{name}」吗?此操作不可撤销。', + nameRequired: '请输入监控名称', + primaryModelRequired: '请输入主模型', + columns: { + name: '名称', + provider: '供应商', + primaryModel: '主模型', + availability7d: '7 天可用率', + latency: '延迟 (ms)', + enabled: '启用', + actions: '操作' + }, + form: { + name: '名称', + namePlaceholder: '输入监控名称', + provider: '供应商', + endpoint: '上游地址', + endpointPlaceholder: 'https://api.example.com', + useCurrentDomain: '使用当前服务', + apiKey: 'API Key', + apiKeyPlaceholder: '请输入 API Key', + apiKeyEditPlaceholder: '留空表示不修改', + useMyKey: '使用我的 Key', + selectKeyTitle: '选择我的 API Key', + selectKeyHint: '仅显示当前账号下处于「启用」状态且未过期的 Key。', + noActiveKey: '没有可用的启用状态 Key', + primaryModel: '主模型', + primaryModelPlaceholder: 'gpt-4o-mini', + extraModels: '附加模型', + extraModelsPlaceholder: '回车添加附加模型', + groupName: '分组名称', + groupNamePlaceholder: '可选,用于在用户视图中聚合显示', + intervalSeconds: '检测间隔 (秒)', + intervalSecondsHint: '范围:15 - 3600 秒', + enabled: '启用监控', + kindRequired: '请选择供应商' + }, + runResultTitle: '检测结果', + noMonitorsYet: '暂无监控', + createFirstMonitor: '创建第一个监控来跟踪渠道可用性' + }, + // Subscriptions Management subscriptions: { title: '订阅管理', diff --git a/frontend/src/router/index.ts b/frontend/src/router/index.ts index b97ccb5d..491a984d 100644 --- a/frontend/src/router/index.ts +++ b/frontend/src/router/index.ts @@ -360,6 +360,10 @@ const routes: RouteRecordRaw[] = [ }, { path: '/admin/channels', + redirect: '/admin/channels/pricing' + }, + { + path: '/admin/channels/pricing', name: 'AdminChannels', component: () => import('@/views/admin/ChannelsView.vue'), meta: { @@ -370,6 +374,29 @@ const routes: RouteRecordRaw[] = [ descriptionKey: 'admin.channels.description' } }, + { + path: '/admin/channels/monitor', + name: 'AdminChannelMonitor', + component: () => import('@/views/admin/ChannelMonitorView.vue'), + meta: { + requiresAuth: true, + requiresAdmin: true, + title: 'Channel Monitor', + titleKey: 'admin.channelMonitor.title', + descriptionKey: 'admin.channelMonitor.description' + } + }, + { + path: '/monitor', + name: 'ChannelStatus', + component: () => import('@/views/user/ChannelStatusView.vue'), + meta: { + requiresAuth: true, + requiresAdmin: false, + title: 'Channel Status', + titleKey: 'nav.channelStatus' + } + }, { path: '/admin/subscriptions', name: 'AdminSubscriptions', diff --git a/frontend/src/views/admin/ChannelMonitorView.vue b/frontend/src/views/admin/ChannelMonitorView.vue new file mode 100644 index 00000000..8f0a1e2f --- /dev/null +++ b/frontend/src/views/admin/ChannelMonitorView.vue @@ -0,0 +1,295 @@ + + + diff --git a/frontend/src/views/user/ChannelStatusView.vue b/frontend/src/views/user/ChannelStatusView.vue new file mode 100644 index 00000000..9f5fe8d1 --- /dev/null +++ b/frontend/src/views/user/ChannelStatusView.vue @@ -0,0 +1,208 @@ + + +