diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 754f814a..a878ea68 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -252,7 +252,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig) opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig) opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) - opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) + opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig, channelMonitorService) opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache, privacyClientFactory, proxyRepository, oAuthRefreshAPI) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) diff --git a/backend/ent/channelmonitor.go b/backend/ent/channelmonitor.go index 292c2b28..58886884 100644 --- a/backend/ent/channelmonitor.go +++ b/backend/ent/channelmonitor.go @@ -54,9 +54,11 @@ type ChannelMonitor struct { type ChannelMonitorEdges struct { // History holds the value of the history edge. History []*ChannelMonitorHistory `json:"history,omitempty"` + // DailyRollups holds the value of the daily_rollups edge. + DailyRollups []*ChannelMonitorDailyRollup `json:"daily_rollups,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [1]bool + loadedTypes [2]bool } // HistoryOrErr returns the History value or an error if the edge @@ -68,6 +70,15 @@ func (e ChannelMonitorEdges) HistoryOrErr() ([]*ChannelMonitorHistory, error) { return nil, &NotLoadedError{edge: "history"} } +// DailyRollupsOrErr returns the DailyRollups value or an error if the edge +// was not loaded in eager-loading. +func (e ChannelMonitorEdges) DailyRollupsOrErr() ([]*ChannelMonitorDailyRollup, error) { + if e.loadedTypes[1] { + return e.DailyRollups, nil + } + return nil, &NotLoadedError{edge: "daily_rollups"} +} + // scanValues returns the types for scanning values from sql.Rows. func (*ChannelMonitor) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) @@ -203,6 +214,11 @@ func (_m *ChannelMonitor) QueryHistory() *ChannelMonitorHistoryQuery { return NewChannelMonitorClient(_m.config).QueryHistory(_m) } +// QueryDailyRollups queries the "daily_rollups" edge of the ChannelMonitor entity. +func (_m *ChannelMonitor) QueryDailyRollups() *ChannelMonitorDailyRollupQuery { + return NewChannelMonitorClient(_m.config).QueryDailyRollups(_m) +} + // Update returns a builder for updating this ChannelMonitor. // Note that you need to call ChannelMonitor.Unwrap() before calling this method if this ChannelMonitor // was returned from a transaction, and the transaction was committed or rolled back. diff --git a/backend/ent/channelmonitor/channelmonitor.go b/backend/ent/channelmonitor/channelmonitor.go index c5ab8199..ff6d7105 100644 --- a/backend/ent/channelmonitor/channelmonitor.go +++ b/backend/ent/channelmonitor/channelmonitor.go @@ -43,6 +43,8 @@ const ( FieldCreatedBy = "created_by" // EdgeHistory holds the string denoting the history edge name in mutations. EdgeHistory = "history" + // EdgeDailyRollups holds the string denoting the daily_rollups edge name in mutations. + EdgeDailyRollups = "daily_rollups" // Table holds the table name of the channelmonitor in the database. Table = "channel_monitors" // HistoryTable is the table that holds the history relation/edge. @@ -52,6 +54,13 @@ const ( HistoryInverseTable = "channel_monitor_histories" // HistoryColumn is the table column denoting the history relation/edge. HistoryColumn = "monitor_id" + // DailyRollupsTable is the table that holds the daily_rollups relation/edge. + DailyRollupsTable = "channel_monitor_daily_rollups" + // DailyRollupsInverseTable is the table name for the ChannelMonitorDailyRollup entity. + // It exists in this package in order to avoid circular dependency with the "channelmonitordailyrollup" package. + DailyRollupsInverseTable = "channel_monitor_daily_rollups" + // DailyRollupsColumn is the table column denoting the daily_rollups relation/edge. + DailyRollupsColumn = "monitor_id" ) // Columns holds all SQL columns for channelmonitor fields. @@ -214,6 +223,20 @@ func ByHistory(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { sqlgraph.OrderByNeighborTerms(s, newHistoryStep(), append([]sql.OrderTerm{term}, terms...)...) } } + +// ByDailyRollupsCount orders the results by daily_rollups count. +func ByDailyRollupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDailyRollupsStep(), opts...) + } +} + +// ByDailyRollups orders the results by daily_rollups terms. +func ByDailyRollups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDailyRollupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} func newHistoryStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), @@ -221,3 +244,10 @@ func newHistoryStep() *sqlgraph.Step { sqlgraph.Edge(sqlgraph.O2M, false, HistoryTable, HistoryColumn), ) } +func newDailyRollupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DailyRollupsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn), + ) +} diff --git a/backend/ent/channelmonitor/where.go b/backend/ent/channelmonitor/where.go index 8126fb77..abb8484d 100644 --- a/backend/ent/channelmonitor/where.go +++ b/backend/ent/channelmonitor/where.go @@ -708,6 +708,29 @@ func HasHistoryWith(preds ...predicate.ChannelMonitorHistory) predicate.ChannelM }) } +// HasDailyRollups applies the HasEdge predicate on the "daily_rollups" edge. +func HasDailyRollups() predicate.ChannelMonitor { + return predicate.ChannelMonitor(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DailyRollupsTable, DailyRollupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDailyRollupsWith applies the HasEdge predicate on the "daily_rollups" edge with a given conditions (other predicates). +func HasDailyRollupsWith(preds ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitor { + return predicate.ChannelMonitor(func(s *sql.Selector) { + step := newDailyRollupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.ChannelMonitor) predicate.ChannelMonitor { return predicate.ChannelMonitor(sql.AndPredicates(predicates...)) diff --git a/backend/ent/channelmonitor_create.go b/backend/ent/channelmonitor_create.go index ad735f3e..30a7b40d 100644 --- a/backend/ent/channelmonitor_create.go +++ b/backend/ent/channelmonitor_create.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" ) @@ -156,6 +157,21 @@ func (_c *ChannelMonitorCreate) AddHistory(v ...*ChannelMonitorHistory) *Channel return _c.AddHistoryIDs(ids...) } +// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs. +func (_c *ChannelMonitorCreate) AddDailyRollupIDs(ids ...int64) *ChannelMonitorCreate { + _c.mutation.AddDailyRollupIDs(ids...) + return _c +} + +// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity. +func (_c *ChannelMonitorCreate) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddDailyRollupIDs(ids...) +} + // Mutation returns the ChannelMonitorMutation object of the builder. func (_c *ChannelMonitorCreate) Mutation() *ChannelMonitorMutation { return _c.mutation @@ -378,6 +394,22 @@ func (_c *ChannelMonitorCreate) createSpec() (*ChannelMonitor, *sqlgraph.CreateS } _spec.Edges = append(_spec.Edges, edge) } + if nodes := _c.mutation.DailyRollupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/backend/ent/channelmonitor_query.go b/backend/ent/channelmonitor_query.go index 6a532587..2ebd95bb 100644 --- a/backend/ent/channelmonitor_query.go +++ b/backend/ent/channelmonitor_query.go @@ -14,6 +14,7 @@ import ( "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/predicate" ) @@ -21,12 +22,13 @@ import ( // ChannelMonitorQuery is the builder for querying ChannelMonitor entities. type ChannelMonitorQuery struct { config - ctx *QueryContext - order []channelmonitor.OrderOption - inters []Interceptor - predicates []predicate.ChannelMonitor - withHistory *ChannelMonitorHistoryQuery - modifiers []func(*sql.Selector) + ctx *QueryContext + order []channelmonitor.OrderOption + inters []Interceptor + predicates []predicate.ChannelMonitor + withHistory *ChannelMonitorHistoryQuery + withDailyRollups *ChannelMonitorDailyRollupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -85,6 +87,28 @@ func (_q *ChannelMonitorQuery) QueryHistory() *ChannelMonitorHistoryQuery { return query } +// QueryDailyRollups chains the current query on the "daily_rollups" edge. +func (_q *ChannelMonitorQuery) QueryDailyRollups() *ChannelMonitorDailyRollupQuery { + query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, selector), + sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + // First returns the first ChannelMonitor entity from the query. // Returns a *NotFoundError when no ChannelMonitor was found. func (_q *ChannelMonitorQuery) First(ctx context.Context) (*ChannelMonitor, error) { @@ -272,12 +296,13 @@ func (_q *ChannelMonitorQuery) Clone() *ChannelMonitorQuery { return nil } return &ChannelMonitorQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]channelmonitor.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.ChannelMonitor{}, _q.predicates...), - withHistory: _q.withHistory.Clone(), + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]channelmonitor.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ChannelMonitor{}, _q.predicates...), + withHistory: _q.withHistory.Clone(), + withDailyRollups: _q.withDailyRollups.Clone(), // clone intermediate query. sql: _q.sql.Clone(), path: _q.path, @@ -295,6 +320,17 @@ func (_q *ChannelMonitorQuery) WithHistory(opts ...func(*ChannelMonitorHistoryQu return _q } +// WithDailyRollups tells the query-builder to eager-load the nodes that are connected to +// the "daily_rollups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ChannelMonitorQuery) WithDailyRollups(opts ...func(*ChannelMonitorDailyRollupQuery)) *ChannelMonitorQuery { + query := (&ChannelMonitorDailyRollupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withDailyRollups = query + return _q +} + // GroupBy is used to group vertices by one or more fields/columns. // It is often used with aggregate functions, like: count, max, mean, min, sum. // @@ -373,8 +409,9 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ( var ( nodes = []*ChannelMonitor{} _spec = _q.querySpec() - loadedTypes = [1]bool{ + loadedTypes = [2]bool{ _q.withHistory != nil, + _q.withDailyRollups != nil, } ) _spec.ScanValues = func(columns []string) ([]any, error) { @@ -405,6 +442,15 @@ func (_q *ChannelMonitorQuery) sqlAll(ctx context.Context, hooks ...queryHook) ( return nil, err } } + if query := _q.withDailyRollups; query != nil { + if err := _q.loadDailyRollups(ctx, query, nodes, + func(n *ChannelMonitor) { n.Edges.DailyRollups = []*ChannelMonitorDailyRollup{} }, + func(n *ChannelMonitor, e *ChannelMonitorDailyRollup) { + n.Edges.DailyRollups = append(n.Edges.DailyRollups, e) + }); err != nil { + return nil, err + } + } return nodes, nil } @@ -438,6 +484,36 @@ func (_q *ChannelMonitorQuery) loadHistory(ctx context.Context, query *ChannelMo } return nil } +func (_q *ChannelMonitorQuery) loadDailyRollups(ctx context.Context, query *ChannelMonitorDailyRollupQuery, nodes []*ChannelMonitor, init func(*ChannelMonitor), assign func(*ChannelMonitor, *ChannelMonitorDailyRollup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*ChannelMonitor) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(channelmonitordailyrollup.FieldMonitorID) + } + query.Where(predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(channelmonitor.DailyRollupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.MonitorID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "monitor_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} func (_q *ChannelMonitorQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() diff --git a/backend/ent/channelmonitor_update.go b/backend/ent/channelmonitor_update.go index df575a9f..7ba4e449 100644 --- a/backend/ent/channelmonitor_update.go +++ b/backend/ent/channelmonitor_update.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/predicate" ) @@ -229,6 +230,21 @@ func (_u *ChannelMonitorUpdate) AddHistory(v ...*ChannelMonitorHistory) *Channel return _u.AddHistoryIDs(ids...) } +// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs. +func (_u *ChannelMonitorUpdate) AddDailyRollupIDs(ids ...int64) *ChannelMonitorUpdate { + _u.mutation.AddDailyRollupIDs(ids...) + return _u +} + +// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity. +func (_u *ChannelMonitorUpdate) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddDailyRollupIDs(ids...) +} + // Mutation returns the ChannelMonitorMutation object of the builder. func (_u *ChannelMonitorUpdate) Mutation() *ChannelMonitorMutation { return _u.mutation @@ -255,6 +271,27 @@ func (_u *ChannelMonitorUpdate) RemoveHistory(v ...*ChannelMonitorHistory) *Chan return _u.RemoveHistoryIDs(ids...) } +// ClearDailyRollups clears all "daily_rollups" edges to the ChannelMonitorDailyRollup entity. +func (_u *ChannelMonitorUpdate) ClearDailyRollups() *ChannelMonitorUpdate { + _u.mutation.ClearDailyRollups() + return _u +} + +// RemoveDailyRollupIDs removes the "daily_rollups" edge to ChannelMonitorDailyRollup entities by IDs. +func (_u *ChannelMonitorUpdate) RemoveDailyRollupIDs(ids ...int64) *ChannelMonitorUpdate { + _u.mutation.RemoveDailyRollupIDs(ids...) + return _u +} + +// RemoveDailyRollups removes "daily_rollups" edges to ChannelMonitorDailyRollup entities. +func (_u *ChannelMonitorUpdate) RemoveDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveDailyRollupIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (_u *ChannelMonitorUpdate) Save(ctx context.Context) (int, error) { _u.defaults() @@ -441,6 +478,51 @@ func (_u *ChannelMonitorUpdate) sqlSave(ctx context.Context) (_node int, err err } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.DailyRollupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedDailyRollupsIDs(); len(nodes) > 0 && !_u.mutation.DailyRollupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DailyRollupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{channelmonitor.Label} @@ -660,6 +742,21 @@ func (_u *ChannelMonitorUpdateOne) AddHistory(v ...*ChannelMonitorHistory) *Chan return _u.AddHistoryIDs(ids...) } +// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs. +func (_u *ChannelMonitorUpdateOne) AddDailyRollupIDs(ids ...int64) *ChannelMonitorUpdateOne { + _u.mutation.AddDailyRollupIDs(ids...) + return _u +} + +// AddDailyRollups adds the "daily_rollups" edges to the ChannelMonitorDailyRollup entity. +func (_u *ChannelMonitorUpdateOne) AddDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddDailyRollupIDs(ids...) +} + // Mutation returns the ChannelMonitorMutation object of the builder. func (_u *ChannelMonitorUpdateOne) Mutation() *ChannelMonitorMutation { return _u.mutation @@ -686,6 +783,27 @@ func (_u *ChannelMonitorUpdateOne) RemoveHistory(v ...*ChannelMonitorHistory) *C return _u.RemoveHistoryIDs(ids...) } +// ClearDailyRollups clears all "daily_rollups" edges to the ChannelMonitorDailyRollup entity. +func (_u *ChannelMonitorUpdateOne) ClearDailyRollups() *ChannelMonitorUpdateOne { + _u.mutation.ClearDailyRollups() + return _u +} + +// RemoveDailyRollupIDs removes the "daily_rollups" edge to ChannelMonitorDailyRollup entities by IDs. +func (_u *ChannelMonitorUpdateOne) RemoveDailyRollupIDs(ids ...int64) *ChannelMonitorUpdateOne { + _u.mutation.RemoveDailyRollupIDs(ids...) + return _u +} + +// RemoveDailyRollups removes "daily_rollups" edges to ChannelMonitorDailyRollup entities. +func (_u *ChannelMonitorUpdateOne) RemoveDailyRollups(v ...*ChannelMonitorDailyRollup) *ChannelMonitorUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveDailyRollupIDs(ids...) +} + // Where appends a list predicates to the ChannelMonitorUpdate builder. func (_u *ChannelMonitorUpdateOne) Where(ps ...predicate.ChannelMonitor) *ChannelMonitorUpdateOne { _u.mutation.Where(ps...) @@ -902,6 +1020,51 @@ func (_u *ChannelMonitorUpdateOne) sqlSave(ctx context.Context) (_node *ChannelM } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.DailyRollupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedDailyRollupsIDs(); len(nodes) > 0 && !_u.mutation.DailyRollupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DailyRollupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: channelmonitor.DailyRollupsTable, + Columns: []string{channelmonitor.DailyRollupsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &ChannelMonitor{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/backend/ent/channelmonitordailyrollup.go b/backend/ent/channelmonitordailyrollup.go new file mode 100644 index 00000000..6c7a8afa --- /dev/null +++ b/backend/ent/channelmonitordailyrollup.go @@ -0,0 +1,292 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" +) + +// ChannelMonitorDailyRollup is the model entity for the ChannelMonitorDailyRollup schema. +type ChannelMonitorDailyRollup struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // MonitorID holds the value of the "monitor_id" field. + MonitorID int64 `json:"monitor_id,omitempty"` + // Model holds the value of the "model" field. + Model string `json:"model,omitempty"` + // BucketDate holds the value of the "bucket_date" field. + BucketDate time.Time `json:"bucket_date,omitempty"` + // TotalChecks holds the value of the "total_checks" field. + TotalChecks int `json:"total_checks,omitempty"` + // OkCount holds the value of the "ok_count" field. + OkCount int `json:"ok_count,omitempty"` + // OperationalCount holds the value of the "operational_count" field. + OperationalCount int `json:"operational_count,omitempty"` + // DegradedCount holds the value of the "degraded_count" field. + DegradedCount int `json:"degraded_count,omitempty"` + // FailedCount holds the value of the "failed_count" field. + FailedCount int `json:"failed_count,omitempty"` + // ErrorCount holds the value of the "error_count" field. + ErrorCount int `json:"error_count,omitempty"` + // SumLatencyMs holds the value of the "sum_latency_ms" field. + SumLatencyMs int64 `json:"sum_latency_ms,omitempty"` + // CountLatency holds the value of the "count_latency" field. + CountLatency int `json:"count_latency,omitempty"` + // SumPingLatencyMs holds the value of the "sum_ping_latency_ms" field. + SumPingLatencyMs int64 `json:"sum_ping_latency_ms,omitempty"` + // CountPingLatency holds the value of the "count_ping_latency" field. + CountPingLatency int `json:"count_ping_latency,omitempty"` + // ComputedAt holds the value of the "computed_at" field. + ComputedAt time.Time `json:"computed_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ChannelMonitorDailyRollupQuery when eager-loading is set. + Edges ChannelMonitorDailyRollupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ChannelMonitorDailyRollupEdges holds the relations/edges for other nodes in the graph. +type ChannelMonitorDailyRollupEdges struct { + // Monitor holds the value of the monitor edge. + Monitor *ChannelMonitor `json:"monitor,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// MonitorOrErr returns the Monitor value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ChannelMonitorDailyRollupEdges) MonitorOrErr() (*ChannelMonitor, error) { + if e.Monitor != nil { + return e.Monitor, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: channelmonitor.Label} + } + return nil, &NotLoadedError{edge: "monitor"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ChannelMonitorDailyRollup) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case channelmonitordailyrollup.FieldID, channelmonitordailyrollup.FieldMonitorID, channelmonitordailyrollup.FieldTotalChecks, channelmonitordailyrollup.FieldOkCount, channelmonitordailyrollup.FieldOperationalCount, channelmonitordailyrollup.FieldDegradedCount, channelmonitordailyrollup.FieldFailedCount, channelmonitordailyrollup.FieldErrorCount, channelmonitordailyrollup.FieldSumLatencyMs, channelmonitordailyrollup.FieldCountLatency, channelmonitordailyrollup.FieldSumPingLatencyMs, channelmonitordailyrollup.FieldCountPingLatency: + values[i] = new(sql.NullInt64) + case channelmonitordailyrollup.FieldModel: + values[i] = new(sql.NullString) + case channelmonitordailyrollup.FieldDeletedAt, channelmonitordailyrollup.FieldBucketDate, channelmonitordailyrollup.FieldComputedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ChannelMonitorDailyRollup fields. +func (_m *ChannelMonitorDailyRollup) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case channelmonitordailyrollup.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case channelmonitordailyrollup.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case channelmonitordailyrollup.FieldMonitorID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field monitor_id", values[i]) + } else if value.Valid { + _m.MonitorID = value.Int64 + } + case channelmonitordailyrollup.FieldModel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field model", values[i]) + } else if value.Valid { + _m.Model = value.String + } + case channelmonitordailyrollup.FieldBucketDate: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field bucket_date", values[i]) + } else if value.Valid { + _m.BucketDate = value.Time + } + case channelmonitordailyrollup.FieldTotalChecks: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field total_checks", values[i]) + } else if value.Valid { + _m.TotalChecks = int(value.Int64) + } + case channelmonitordailyrollup.FieldOkCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field ok_count", values[i]) + } else if value.Valid { + _m.OkCount = int(value.Int64) + } + case channelmonitordailyrollup.FieldOperationalCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field operational_count", values[i]) + } else if value.Valid { + _m.OperationalCount = int(value.Int64) + } + case channelmonitordailyrollup.FieldDegradedCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field degraded_count", values[i]) + } else if value.Valid { + _m.DegradedCount = int(value.Int64) + } + case channelmonitordailyrollup.FieldFailedCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field failed_count", values[i]) + } else if value.Valid { + _m.FailedCount = int(value.Int64) + } + case channelmonitordailyrollup.FieldErrorCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field error_count", values[i]) + } else if value.Valid { + _m.ErrorCount = int(value.Int64) + } + case channelmonitordailyrollup.FieldSumLatencyMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field sum_latency_ms", values[i]) + } else if value.Valid { + _m.SumLatencyMs = value.Int64 + } + case channelmonitordailyrollup.FieldCountLatency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field count_latency", values[i]) + } else if value.Valid { + _m.CountLatency = int(value.Int64) + } + case channelmonitordailyrollup.FieldSumPingLatencyMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field sum_ping_latency_ms", values[i]) + } else if value.Valid { + _m.SumPingLatencyMs = value.Int64 + } + case channelmonitordailyrollup.FieldCountPingLatency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field count_ping_latency", values[i]) + } else if value.Valid { + _m.CountPingLatency = int(value.Int64) + } + case channelmonitordailyrollup.FieldComputedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field computed_at", values[i]) + } else if value.Valid { + _m.ComputedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ChannelMonitorDailyRollup. +// This includes values selected through modifiers, order, etc. +func (_m *ChannelMonitorDailyRollup) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryMonitor queries the "monitor" edge of the ChannelMonitorDailyRollup entity. +func (_m *ChannelMonitorDailyRollup) QueryMonitor() *ChannelMonitorQuery { + return NewChannelMonitorDailyRollupClient(_m.config).QueryMonitor(_m) +} + +// Update returns a builder for updating this ChannelMonitorDailyRollup. +// Note that you need to call ChannelMonitorDailyRollup.Unwrap() before calling this method if this ChannelMonitorDailyRollup +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *ChannelMonitorDailyRollup) Update() *ChannelMonitorDailyRollupUpdateOne { + return NewChannelMonitorDailyRollupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the ChannelMonitorDailyRollup entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *ChannelMonitorDailyRollup) Unwrap() *ChannelMonitorDailyRollup { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: ChannelMonitorDailyRollup is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *ChannelMonitorDailyRollup) String() string { + var builder strings.Builder + builder.WriteString("ChannelMonitorDailyRollup(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("monitor_id=") + builder.WriteString(fmt.Sprintf("%v", _m.MonitorID)) + builder.WriteString(", ") + builder.WriteString("model=") + builder.WriteString(_m.Model) + builder.WriteString(", ") + builder.WriteString("bucket_date=") + builder.WriteString(_m.BucketDate.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("total_checks=") + builder.WriteString(fmt.Sprintf("%v", _m.TotalChecks)) + builder.WriteString(", ") + builder.WriteString("ok_count=") + builder.WriteString(fmt.Sprintf("%v", _m.OkCount)) + builder.WriteString(", ") + builder.WriteString("operational_count=") + builder.WriteString(fmt.Sprintf("%v", _m.OperationalCount)) + builder.WriteString(", ") + builder.WriteString("degraded_count=") + builder.WriteString(fmt.Sprintf("%v", _m.DegradedCount)) + builder.WriteString(", ") + builder.WriteString("failed_count=") + builder.WriteString(fmt.Sprintf("%v", _m.FailedCount)) + builder.WriteString(", ") + builder.WriteString("error_count=") + builder.WriteString(fmt.Sprintf("%v", _m.ErrorCount)) + builder.WriteString(", ") + builder.WriteString("sum_latency_ms=") + builder.WriteString(fmt.Sprintf("%v", _m.SumLatencyMs)) + builder.WriteString(", ") + builder.WriteString("count_latency=") + builder.WriteString(fmt.Sprintf("%v", _m.CountLatency)) + builder.WriteString(", ") + builder.WriteString("sum_ping_latency_ms=") + builder.WriteString(fmt.Sprintf("%v", _m.SumPingLatencyMs)) + builder.WriteString(", ") + builder.WriteString("count_ping_latency=") + builder.WriteString(fmt.Sprintf("%v", _m.CountPingLatency)) + builder.WriteString(", ") + builder.WriteString("computed_at=") + builder.WriteString(_m.ComputedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// ChannelMonitorDailyRollups is a parsable slice of ChannelMonitorDailyRollup. +type ChannelMonitorDailyRollups []*ChannelMonitorDailyRollup diff --git a/backend/ent/channelmonitordailyrollup/channelmonitordailyrollup.go b/backend/ent/channelmonitordailyrollup/channelmonitordailyrollup.go new file mode 100644 index 00000000..eb1f69a8 --- /dev/null +++ b/backend/ent/channelmonitordailyrollup/channelmonitordailyrollup.go @@ -0,0 +1,222 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitordailyrollup + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the channelmonitordailyrollup type in the database. + Label = "channel_monitor_daily_rollup" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldMonitorID holds the string denoting the monitor_id field in the database. + FieldMonitorID = "monitor_id" + // FieldModel holds the string denoting the model field in the database. + FieldModel = "model" + // FieldBucketDate holds the string denoting the bucket_date field in the database. + FieldBucketDate = "bucket_date" + // FieldTotalChecks holds the string denoting the total_checks field in the database. + FieldTotalChecks = "total_checks" + // FieldOkCount holds the string denoting the ok_count field in the database. + FieldOkCount = "ok_count" + // FieldOperationalCount holds the string denoting the operational_count field in the database. + FieldOperationalCount = "operational_count" + // FieldDegradedCount holds the string denoting the degraded_count field in the database. + FieldDegradedCount = "degraded_count" + // FieldFailedCount holds the string denoting the failed_count field in the database. + FieldFailedCount = "failed_count" + // FieldErrorCount holds the string denoting the error_count field in the database. + FieldErrorCount = "error_count" + // FieldSumLatencyMs holds the string denoting the sum_latency_ms field in the database. + FieldSumLatencyMs = "sum_latency_ms" + // FieldCountLatency holds the string denoting the count_latency field in the database. + FieldCountLatency = "count_latency" + // FieldSumPingLatencyMs holds the string denoting the sum_ping_latency_ms field in the database. + FieldSumPingLatencyMs = "sum_ping_latency_ms" + // FieldCountPingLatency holds the string denoting the count_ping_latency field in the database. + FieldCountPingLatency = "count_ping_latency" + // FieldComputedAt holds the string denoting the computed_at field in the database. + FieldComputedAt = "computed_at" + // EdgeMonitor holds the string denoting the monitor edge name in mutations. + EdgeMonitor = "monitor" + // Table holds the table name of the channelmonitordailyrollup in the database. + Table = "channel_monitor_daily_rollups" + // MonitorTable is the table that holds the monitor relation/edge. + MonitorTable = "channel_monitor_daily_rollups" + // MonitorInverseTable is the table name for the ChannelMonitor entity. + // It exists in this package in order to avoid circular dependency with the "channelmonitor" package. + MonitorInverseTable = "channel_monitors" + // MonitorColumn is the table column denoting the monitor relation/edge. + MonitorColumn = "monitor_id" +) + +// Columns holds all SQL columns for channelmonitordailyrollup fields. +var Columns = []string{ + FieldID, + FieldDeletedAt, + FieldMonitorID, + FieldModel, + FieldBucketDate, + FieldTotalChecks, + FieldOkCount, + FieldOperationalCount, + FieldDegradedCount, + FieldFailedCount, + FieldErrorCount, + FieldSumLatencyMs, + FieldCountLatency, + FieldSumPingLatencyMs, + FieldCountPingLatency, + FieldComputedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // ModelValidator is a validator for the "model" field. It is called by the builders before save. + ModelValidator func(string) error + // DefaultTotalChecks holds the default value on creation for the "total_checks" field. + DefaultTotalChecks int + // DefaultOkCount holds the default value on creation for the "ok_count" field. + DefaultOkCount int + // DefaultOperationalCount holds the default value on creation for the "operational_count" field. + DefaultOperationalCount int + // DefaultDegradedCount holds the default value on creation for the "degraded_count" field. + DefaultDegradedCount int + // DefaultFailedCount holds the default value on creation for the "failed_count" field. + DefaultFailedCount int + // DefaultErrorCount holds the default value on creation for the "error_count" field. + DefaultErrorCount int + // DefaultSumLatencyMs holds the default value on creation for the "sum_latency_ms" field. + DefaultSumLatencyMs int64 + // DefaultCountLatency holds the default value on creation for the "count_latency" field. + DefaultCountLatency int + // DefaultSumPingLatencyMs holds the default value on creation for the "sum_ping_latency_ms" field. + DefaultSumPingLatencyMs int64 + // DefaultCountPingLatency holds the default value on creation for the "count_ping_latency" field. + DefaultCountPingLatency int + // DefaultComputedAt holds the default value on creation for the "computed_at" field. + DefaultComputedAt func() time.Time + // UpdateDefaultComputedAt holds the default value on update for the "computed_at" field. + UpdateDefaultComputedAt func() time.Time +) + +// OrderOption defines the ordering options for the ChannelMonitorDailyRollup queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByMonitorID orders the results by the monitor_id field. +func ByMonitorID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonitorID, opts...).ToFunc() +} + +// ByModel orders the results by the model field. +func ByModel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldModel, opts...).ToFunc() +} + +// ByBucketDate orders the results by the bucket_date field. +func ByBucketDate(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBucketDate, opts...).ToFunc() +} + +// ByTotalChecks orders the results by the total_checks field. +func ByTotalChecks(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotalChecks, opts...).ToFunc() +} + +// ByOkCount orders the results by the ok_count field. +func ByOkCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOkCount, opts...).ToFunc() +} + +// ByOperationalCount orders the results by the operational_count field. +func ByOperationalCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOperationalCount, opts...).ToFunc() +} + +// ByDegradedCount orders the results by the degraded_count field. +func ByDegradedCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDegradedCount, opts...).ToFunc() +} + +// ByFailedCount orders the results by the failed_count field. +func ByFailedCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFailedCount, opts...).ToFunc() +} + +// ByErrorCount orders the results by the error_count field. +func ByErrorCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorCount, opts...).ToFunc() +} + +// BySumLatencyMs orders the results by the sum_latency_ms field. +func BySumLatencyMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSumLatencyMs, opts...).ToFunc() +} + +// ByCountLatency orders the results by the count_latency field. +func ByCountLatency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCountLatency, opts...).ToFunc() +} + +// BySumPingLatencyMs orders the results by the sum_ping_latency_ms field. +func BySumPingLatencyMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSumPingLatencyMs, opts...).ToFunc() +} + +// ByCountPingLatency orders the results by the count_ping_latency field. +func ByCountPingLatency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCountPingLatency, opts...).ToFunc() +} + +// ByComputedAt orders the results by the computed_at field. +func ByComputedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldComputedAt, opts...).ToFunc() +} + +// ByMonitorField orders the results by monitor field. +func ByMonitorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMonitorStep(), sql.OrderByField(field, opts...)) + } +} +func newMonitorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MonitorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn), + ) +} diff --git a/backend/ent/channelmonitordailyrollup/where.go b/backend/ent/channelmonitordailyrollup/where.go new file mode 100644 index 00000000..9da8d4be --- /dev/null +++ b/backend/ent/channelmonitordailyrollup/where.go @@ -0,0 +1,784 @@ +// Code generated by ent, DO NOT EDIT. + +package channelmonitordailyrollup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldID, id)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDeletedAt, v)) +} + +// MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ. +func MonitorID(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v)) +} + +// Model applies equality check predicate on the "model" field. It's identical to ModelEQ. +func Model(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v)) +} + +// BucketDate applies equality check predicate on the "bucket_date" field. It's identical to BucketDateEQ. +func BucketDate(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v)) +} + +// TotalChecks applies equality check predicate on the "total_checks" field. It's identical to TotalChecksEQ. +func TotalChecks(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v)) +} + +// OkCount applies equality check predicate on the "ok_count" field. It's identical to OkCountEQ. +func OkCount(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v)) +} + +// OperationalCount applies equality check predicate on the "operational_count" field. It's identical to OperationalCountEQ. +func OperationalCount(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v)) +} + +// DegradedCount applies equality check predicate on the "degraded_count" field. It's identical to DegradedCountEQ. +func DegradedCount(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v)) +} + +// FailedCount applies equality check predicate on the "failed_count" field. It's identical to FailedCountEQ. +func FailedCount(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v)) +} + +// ErrorCount applies equality check predicate on the "error_count" field. It's identical to ErrorCountEQ. +func ErrorCount(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v)) +} + +// SumLatencyMs applies equality check predicate on the "sum_latency_ms" field. It's identical to SumLatencyMsEQ. +func SumLatencyMs(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v)) +} + +// CountLatency applies equality check predicate on the "count_latency" field. It's identical to CountLatencyEQ. +func CountLatency(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v)) +} + +// SumPingLatencyMs applies equality check predicate on the "sum_ping_latency_ms" field. It's identical to SumPingLatencyMsEQ. +func SumPingLatencyMs(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v)) +} + +// CountPingLatency applies equality check predicate on the "count_ping_latency" field. It's identical to CountPingLatencyEQ. +func CountPingLatency(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v)) +} + +// ComputedAt applies equality check predicate on the "computed_at" field. It's identical to ComputedAtEQ. +func ComputedAt(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotNull(FieldDeletedAt)) +} + +// MonitorIDEQ applies the EQ predicate on the "monitor_id" field. +func MonitorIDEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldMonitorID, v)) +} + +// MonitorIDNEQ applies the NEQ predicate on the "monitor_id" field. +func MonitorIDNEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldMonitorID, v)) +} + +// MonitorIDIn applies the In predicate on the "monitor_id" field. +func MonitorIDIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldMonitorID, vs...)) +} + +// MonitorIDNotIn applies the NotIn predicate on the "monitor_id" field. +func MonitorIDNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldMonitorID, vs...)) +} + +// ModelEQ applies the EQ predicate on the "model" field. +func ModelEQ(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldModel, v)) +} + +// ModelNEQ applies the NEQ predicate on the "model" field. +func ModelNEQ(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldModel, v)) +} + +// ModelIn applies the In predicate on the "model" field. +func ModelIn(vs ...string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldModel, vs...)) +} + +// ModelNotIn applies the NotIn predicate on the "model" field. +func ModelNotIn(vs ...string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldModel, vs...)) +} + +// ModelGT applies the GT predicate on the "model" field. +func ModelGT(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldModel, v)) +} + +// ModelGTE applies the GTE predicate on the "model" field. +func ModelGTE(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldModel, v)) +} + +// ModelLT applies the LT predicate on the "model" field. +func ModelLT(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldModel, v)) +} + +// ModelLTE applies the LTE predicate on the "model" field. +func ModelLTE(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldModel, v)) +} + +// ModelContains applies the Contains predicate on the "model" field. +func ModelContains(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldContains(FieldModel, v)) +} + +// ModelHasPrefix applies the HasPrefix predicate on the "model" field. +func ModelHasPrefix(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldHasPrefix(FieldModel, v)) +} + +// ModelHasSuffix applies the HasSuffix predicate on the "model" field. +func ModelHasSuffix(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldHasSuffix(FieldModel, v)) +} + +// ModelEqualFold applies the EqualFold predicate on the "model" field. +func ModelEqualFold(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEqualFold(FieldModel, v)) +} + +// ModelContainsFold applies the ContainsFold predicate on the "model" field. +func ModelContainsFold(v string) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldContainsFold(FieldModel, v)) +} + +// BucketDateEQ applies the EQ predicate on the "bucket_date" field. +func BucketDateEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldBucketDate, v)) +} + +// BucketDateNEQ applies the NEQ predicate on the "bucket_date" field. +func BucketDateNEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldBucketDate, v)) +} + +// BucketDateIn applies the In predicate on the "bucket_date" field. +func BucketDateIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldBucketDate, vs...)) +} + +// BucketDateNotIn applies the NotIn predicate on the "bucket_date" field. +func BucketDateNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldBucketDate, vs...)) +} + +// BucketDateGT applies the GT predicate on the "bucket_date" field. +func BucketDateGT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldBucketDate, v)) +} + +// BucketDateGTE applies the GTE predicate on the "bucket_date" field. +func BucketDateGTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldBucketDate, v)) +} + +// BucketDateLT applies the LT predicate on the "bucket_date" field. +func BucketDateLT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldBucketDate, v)) +} + +// BucketDateLTE applies the LTE predicate on the "bucket_date" field. +func BucketDateLTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldBucketDate, v)) +} + +// TotalChecksEQ applies the EQ predicate on the "total_checks" field. +func TotalChecksEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldTotalChecks, v)) +} + +// TotalChecksNEQ applies the NEQ predicate on the "total_checks" field. +func TotalChecksNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldTotalChecks, v)) +} + +// TotalChecksIn applies the In predicate on the "total_checks" field. +func TotalChecksIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldTotalChecks, vs...)) +} + +// TotalChecksNotIn applies the NotIn predicate on the "total_checks" field. +func TotalChecksNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldTotalChecks, vs...)) +} + +// TotalChecksGT applies the GT predicate on the "total_checks" field. +func TotalChecksGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldTotalChecks, v)) +} + +// TotalChecksGTE applies the GTE predicate on the "total_checks" field. +func TotalChecksGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldTotalChecks, v)) +} + +// TotalChecksLT applies the LT predicate on the "total_checks" field. +func TotalChecksLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldTotalChecks, v)) +} + +// TotalChecksLTE applies the LTE predicate on the "total_checks" field. +func TotalChecksLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldTotalChecks, v)) +} + +// OkCountEQ applies the EQ predicate on the "ok_count" field. +func OkCountEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOkCount, v)) +} + +// OkCountNEQ applies the NEQ predicate on the "ok_count" field. +func OkCountNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOkCount, v)) +} + +// OkCountIn applies the In predicate on the "ok_count" field. +func OkCountIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOkCount, vs...)) +} + +// OkCountNotIn applies the NotIn predicate on the "ok_count" field. +func OkCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOkCount, vs...)) +} + +// OkCountGT applies the GT predicate on the "ok_count" field. +func OkCountGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOkCount, v)) +} + +// OkCountGTE applies the GTE predicate on the "ok_count" field. +func OkCountGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOkCount, v)) +} + +// OkCountLT applies the LT predicate on the "ok_count" field. +func OkCountLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOkCount, v)) +} + +// OkCountLTE applies the LTE predicate on the "ok_count" field. +func OkCountLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOkCount, v)) +} + +// OperationalCountEQ applies the EQ predicate on the "operational_count" field. +func OperationalCountEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldOperationalCount, v)) +} + +// OperationalCountNEQ applies the NEQ predicate on the "operational_count" field. +func OperationalCountNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldOperationalCount, v)) +} + +// OperationalCountIn applies the In predicate on the "operational_count" field. +func OperationalCountIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldOperationalCount, vs...)) +} + +// OperationalCountNotIn applies the NotIn predicate on the "operational_count" field. +func OperationalCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldOperationalCount, vs...)) +} + +// OperationalCountGT applies the GT predicate on the "operational_count" field. +func OperationalCountGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldOperationalCount, v)) +} + +// OperationalCountGTE applies the GTE predicate on the "operational_count" field. +func OperationalCountGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldOperationalCount, v)) +} + +// OperationalCountLT applies the LT predicate on the "operational_count" field. +func OperationalCountLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldOperationalCount, v)) +} + +// OperationalCountLTE applies the LTE predicate on the "operational_count" field. +func OperationalCountLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldOperationalCount, v)) +} + +// DegradedCountEQ applies the EQ predicate on the "degraded_count" field. +func DegradedCountEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldDegradedCount, v)) +} + +// DegradedCountNEQ applies the NEQ predicate on the "degraded_count" field. +func DegradedCountNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldDegradedCount, v)) +} + +// DegradedCountIn applies the In predicate on the "degraded_count" field. +func DegradedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldDegradedCount, vs...)) +} + +// DegradedCountNotIn applies the NotIn predicate on the "degraded_count" field. +func DegradedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldDegradedCount, vs...)) +} + +// DegradedCountGT applies the GT predicate on the "degraded_count" field. +func DegradedCountGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldDegradedCount, v)) +} + +// DegradedCountGTE applies the GTE predicate on the "degraded_count" field. +func DegradedCountGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldDegradedCount, v)) +} + +// DegradedCountLT applies the LT predicate on the "degraded_count" field. +func DegradedCountLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldDegradedCount, v)) +} + +// DegradedCountLTE applies the LTE predicate on the "degraded_count" field. +func DegradedCountLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldDegradedCount, v)) +} + +// FailedCountEQ applies the EQ predicate on the "failed_count" field. +func FailedCountEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldFailedCount, v)) +} + +// FailedCountNEQ applies the NEQ predicate on the "failed_count" field. +func FailedCountNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldFailedCount, v)) +} + +// FailedCountIn applies the In predicate on the "failed_count" field. +func FailedCountIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldFailedCount, vs...)) +} + +// FailedCountNotIn applies the NotIn predicate on the "failed_count" field. +func FailedCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldFailedCount, vs...)) +} + +// FailedCountGT applies the GT predicate on the "failed_count" field. +func FailedCountGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldFailedCount, v)) +} + +// FailedCountGTE applies the GTE predicate on the "failed_count" field. +func FailedCountGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldFailedCount, v)) +} + +// FailedCountLT applies the LT predicate on the "failed_count" field. +func FailedCountLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldFailedCount, v)) +} + +// FailedCountLTE applies the LTE predicate on the "failed_count" field. +func FailedCountLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldFailedCount, v)) +} + +// ErrorCountEQ applies the EQ predicate on the "error_count" field. +func ErrorCountEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldErrorCount, v)) +} + +// ErrorCountNEQ applies the NEQ predicate on the "error_count" field. +func ErrorCountNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldErrorCount, v)) +} + +// ErrorCountIn applies the In predicate on the "error_count" field. +func ErrorCountIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldErrorCount, vs...)) +} + +// ErrorCountNotIn applies the NotIn predicate on the "error_count" field. +func ErrorCountNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldErrorCount, vs...)) +} + +// ErrorCountGT applies the GT predicate on the "error_count" field. +func ErrorCountGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldErrorCount, v)) +} + +// ErrorCountGTE applies the GTE predicate on the "error_count" field. +func ErrorCountGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldErrorCount, v)) +} + +// ErrorCountLT applies the LT predicate on the "error_count" field. +func ErrorCountLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldErrorCount, v)) +} + +// ErrorCountLTE applies the LTE predicate on the "error_count" field. +func ErrorCountLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldErrorCount, v)) +} + +// SumLatencyMsEQ applies the EQ predicate on the "sum_latency_ms" field. +func SumLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumLatencyMs, v)) +} + +// SumLatencyMsNEQ applies the NEQ predicate on the "sum_latency_ms" field. +func SumLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumLatencyMs, v)) +} + +// SumLatencyMsIn applies the In predicate on the "sum_latency_ms" field. +func SumLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumLatencyMs, vs...)) +} + +// SumLatencyMsNotIn applies the NotIn predicate on the "sum_latency_ms" field. +func SumLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumLatencyMs, vs...)) +} + +// SumLatencyMsGT applies the GT predicate on the "sum_latency_ms" field. +func SumLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumLatencyMs, v)) +} + +// SumLatencyMsGTE applies the GTE predicate on the "sum_latency_ms" field. +func SumLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumLatencyMs, v)) +} + +// SumLatencyMsLT applies the LT predicate on the "sum_latency_ms" field. +func SumLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumLatencyMs, v)) +} + +// SumLatencyMsLTE applies the LTE predicate on the "sum_latency_ms" field. +func SumLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumLatencyMs, v)) +} + +// CountLatencyEQ applies the EQ predicate on the "count_latency" field. +func CountLatencyEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountLatency, v)) +} + +// CountLatencyNEQ applies the NEQ predicate on the "count_latency" field. +func CountLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountLatency, v)) +} + +// CountLatencyIn applies the In predicate on the "count_latency" field. +func CountLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountLatency, vs...)) +} + +// CountLatencyNotIn applies the NotIn predicate on the "count_latency" field. +func CountLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountLatency, vs...)) +} + +// CountLatencyGT applies the GT predicate on the "count_latency" field. +func CountLatencyGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountLatency, v)) +} + +// CountLatencyGTE applies the GTE predicate on the "count_latency" field. +func CountLatencyGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountLatency, v)) +} + +// CountLatencyLT applies the LT predicate on the "count_latency" field. +func CountLatencyLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountLatency, v)) +} + +// CountLatencyLTE applies the LTE predicate on the "count_latency" field. +func CountLatencyLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountLatency, v)) +} + +// SumPingLatencyMsEQ applies the EQ predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldSumPingLatencyMs, v)) +} + +// SumPingLatencyMsNEQ applies the NEQ predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsNEQ(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldSumPingLatencyMs, v)) +} + +// SumPingLatencyMsIn applies the In predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldSumPingLatencyMs, vs...)) +} + +// SumPingLatencyMsNotIn applies the NotIn predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsNotIn(vs ...int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldSumPingLatencyMs, vs...)) +} + +// SumPingLatencyMsGT applies the GT predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsGT(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldSumPingLatencyMs, v)) +} + +// SumPingLatencyMsGTE applies the GTE predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsGTE(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldSumPingLatencyMs, v)) +} + +// SumPingLatencyMsLT applies the LT predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsLT(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldSumPingLatencyMs, v)) +} + +// SumPingLatencyMsLTE applies the LTE predicate on the "sum_ping_latency_ms" field. +func SumPingLatencyMsLTE(v int64) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldSumPingLatencyMs, v)) +} + +// CountPingLatencyEQ applies the EQ predicate on the "count_ping_latency" field. +func CountPingLatencyEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldCountPingLatency, v)) +} + +// CountPingLatencyNEQ applies the NEQ predicate on the "count_ping_latency" field. +func CountPingLatencyNEQ(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldCountPingLatency, v)) +} + +// CountPingLatencyIn applies the In predicate on the "count_ping_latency" field. +func CountPingLatencyIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldCountPingLatency, vs...)) +} + +// CountPingLatencyNotIn applies the NotIn predicate on the "count_ping_latency" field. +func CountPingLatencyNotIn(vs ...int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldCountPingLatency, vs...)) +} + +// CountPingLatencyGT applies the GT predicate on the "count_ping_latency" field. +func CountPingLatencyGT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldCountPingLatency, v)) +} + +// CountPingLatencyGTE applies the GTE predicate on the "count_ping_latency" field. +func CountPingLatencyGTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldCountPingLatency, v)) +} + +// CountPingLatencyLT applies the LT predicate on the "count_ping_latency" field. +func CountPingLatencyLT(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldCountPingLatency, v)) +} + +// CountPingLatencyLTE applies the LTE predicate on the "count_ping_latency" field. +func CountPingLatencyLTE(v int) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldCountPingLatency, v)) +} + +// ComputedAtEQ applies the EQ predicate on the "computed_at" field. +func ComputedAtEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldEQ(FieldComputedAt, v)) +} + +// ComputedAtNEQ applies the NEQ predicate on the "computed_at" field. +func ComputedAtNEQ(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNEQ(FieldComputedAt, v)) +} + +// ComputedAtIn applies the In predicate on the "computed_at" field. +func ComputedAtIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldIn(FieldComputedAt, vs...)) +} + +// ComputedAtNotIn applies the NotIn predicate on the "computed_at" field. +func ComputedAtNotIn(vs ...time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldNotIn(FieldComputedAt, vs...)) +} + +// ComputedAtGT applies the GT predicate on the "computed_at" field. +func ComputedAtGT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGT(FieldComputedAt, v)) +} + +// ComputedAtGTE applies the GTE predicate on the "computed_at" field. +func ComputedAtGTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldGTE(FieldComputedAt, v)) +} + +// ComputedAtLT applies the LT predicate on the "computed_at" field. +func ComputedAtLT(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLT(FieldComputedAt, v)) +} + +// ComputedAtLTE applies the LTE predicate on the "computed_at" field. +func ComputedAtLTE(v time.Time) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.FieldLTE(FieldComputedAt, v)) +} + +// HasMonitor applies the HasEdge predicate on the "monitor" edge. +func HasMonitor() predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, MonitorTable, MonitorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMonitorWith applies the HasEdge predicate on the "monitor" edge with a given conditions (other predicates). +func HasMonitorWith(preds ...predicate.ChannelMonitor) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(func(s *sql.Selector) { + step := newMonitorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ChannelMonitorDailyRollup) predicate.ChannelMonitorDailyRollup { + return predicate.ChannelMonitorDailyRollup(sql.NotPredicates(p)) +} diff --git a/backend/ent/channelmonitordailyrollup_create.go b/backend/ent/channelmonitordailyrollup_create.go new file mode 100644 index 00000000..c4850751 --- /dev/null +++ b/backend/ent/channelmonitordailyrollup_create.go @@ -0,0 +1,1593 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" +) + +// ChannelMonitorDailyRollupCreate is the builder for creating a ChannelMonitorDailyRollup entity. +type ChannelMonitorDailyRollupCreate struct { + config + mutation *ChannelMonitorDailyRollupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *ChannelMonitorDailyRollupCreate) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetMonitorID sets the "monitor_id" field. +func (_c *ChannelMonitorDailyRollupCreate) SetMonitorID(v int64) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetMonitorID(v) + return _c +} + +// SetModel sets the "model" field. +func (_c *ChannelMonitorDailyRollupCreate) SetModel(v string) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetModel(v) + return _c +} + +// SetBucketDate sets the "bucket_date" field. +func (_c *ChannelMonitorDailyRollupCreate) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetBucketDate(v) + return _c +} + +// SetTotalChecks sets the "total_checks" field. +func (_c *ChannelMonitorDailyRollupCreate) SetTotalChecks(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetTotalChecks(v) + return _c +} + +// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetTotalChecks(*v) + } + return _c +} + +// SetOkCount sets the "ok_count" field. +func (_c *ChannelMonitorDailyRollupCreate) SetOkCount(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetOkCount(v) + return _c +} + +// SetNillableOkCount sets the "ok_count" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetOkCount(*v) + } + return _c +} + +// SetOperationalCount sets the "operational_count" field. +func (_c *ChannelMonitorDailyRollupCreate) SetOperationalCount(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetOperationalCount(v) + return _c +} + +// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetOperationalCount(*v) + } + return _c +} + +// SetDegradedCount sets the "degraded_count" field. +func (_c *ChannelMonitorDailyRollupCreate) SetDegradedCount(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetDegradedCount(v) + return _c +} + +// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetDegradedCount(*v) + } + return _c +} + +// SetFailedCount sets the "failed_count" field. +func (_c *ChannelMonitorDailyRollupCreate) SetFailedCount(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetFailedCount(v) + return _c +} + +// SetNillableFailedCount sets the "failed_count" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetFailedCount(*v) + } + return _c +} + +// SetErrorCount sets the "error_count" field. +func (_c *ChannelMonitorDailyRollupCreate) SetErrorCount(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetErrorCount(v) + return _c +} + +// SetNillableErrorCount sets the "error_count" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetErrorCount(*v) + } + return _c +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (_c *ChannelMonitorDailyRollupCreate) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetSumLatencyMs(v) + return _c +} + +// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetSumLatencyMs(*v) + } + return _c +} + +// SetCountLatency sets the "count_latency" field. +func (_c *ChannelMonitorDailyRollupCreate) SetCountLatency(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetCountLatency(v) + return _c +} + +// SetNillableCountLatency sets the "count_latency" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetCountLatency(*v) + } + return _c +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (_c *ChannelMonitorDailyRollupCreate) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetSumPingLatencyMs(v) + return _c +} + +// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetSumPingLatencyMs(*v) + } + return _c +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (_c *ChannelMonitorDailyRollupCreate) SetCountPingLatency(v int) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetCountPingLatency(v) + return _c +} + +// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetCountPingLatency(*v) + } + return _c +} + +// SetComputedAt sets the "computed_at" field. +func (_c *ChannelMonitorDailyRollupCreate) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupCreate { + _c.mutation.SetComputedAt(v) + return _c +} + +// SetNillableComputedAt sets the "computed_at" field if the given value is not nil. +func (_c *ChannelMonitorDailyRollupCreate) SetNillableComputedAt(v *time.Time) *ChannelMonitorDailyRollupCreate { + if v != nil { + _c.SetComputedAt(*v) + } + return _c +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_c *ChannelMonitorDailyRollupCreate) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupCreate { + return _c.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder. +func (_c *ChannelMonitorDailyRollupCreate) Mutation() *ChannelMonitorDailyRollupMutation { + return _c.mutation +} + +// Save creates the ChannelMonitorDailyRollup in the database. +func (_c *ChannelMonitorDailyRollupCreate) Save(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ChannelMonitorDailyRollupCreate) SaveX(ctx context.Context) *ChannelMonitorDailyRollup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorDailyRollupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorDailyRollupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ChannelMonitorDailyRollupCreate) defaults() error { + if _, ok := _c.mutation.TotalChecks(); !ok { + v := channelmonitordailyrollup.DefaultTotalChecks + _c.mutation.SetTotalChecks(v) + } + if _, ok := _c.mutation.OkCount(); !ok { + v := channelmonitordailyrollup.DefaultOkCount + _c.mutation.SetOkCount(v) + } + if _, ok := _c.mutation.OperationalCount(); !ok { + v := channelmonitordailyrollup.DefaultOperationalCount + _c.mutation.SetOperationalCount(v) + } + if _, ok := _c.mutation.DegradedCount(); !ok { + v := channelmonitordailyrollup.DefaultDegradedCount + _c.mutation.SetDegradedCount(v) + } + if _, ok := _c.mutation.FailedCount(); !ok { + v := channelmonitordailyrollup.DefaultFailedCount + _c.mutation.SetFailedCount(v) + } + if _, ok := _c.mutation.ErrorCount(); !ok { + v := channelmonitordailyrollup.DefaultErrorCount + _c.mutation.SetErrorCount(v) + } + if _, ok := _c.mutation.SumLatencyMs(); !ok { + v := channelmonitordailyrollup.DefaultSumLatencyMs + _c.mutation.SetSumLatencyMs(v) + } + if _, ok := _c.mutation.CountLatency(); !ok { + v := channelmonitordailyrollup.DefaultCountLatency + _c.mutation.SetCountLatency(v) + } + if _, ok := _c.mutation.SumPingLatencyMs(); !ok { + v := channelmonitordailyrollup.DefaultSumPingLatencyMs + _c.mutation.SetSumPingLatencyMs(v) + } + if _, ok := _c.mutation.CountPingLatency(); !ok { + v := channelmonitordailyrollup.DefaultCountPingLatency + _c.mutation.SetCountPingLatency(v) + } + if _, ok := _c.mutation.ComputedAt(); !ok { + if channelmonitordailyrollup.DefaultComputedAt == nil { + return fmt.Errorf("ent: uninitialized channelmonitordailyrollup.DefaultComputedAt (forgotten import ent/runtime?)") + } + v := channelmonitordailyrollup.DefaultComputedAt() + _c.mutation.SetComputedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ChannelMonitorDailyRollupCreate) check() error { + if _, ok := _c.mutation.MonitorID(); !ok { + return &ValidationError{Name: "monitor_id", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.monitor_id"`)} + } + if _, ok := _c.mutation.Model(); !ok { + return &ValidationError{Name: "model", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.model"`)} + } + if v, ok := _c.mutation.Model(); ok { + if err := channelmonitordailyrollup.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)} + } + } + if _, ok := _c.mutation.BucketDate(); !ok { + return &ValidationError{Name: "bucket_date", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.bucket_date"`)} + } + if _, ok := _c.mutation.TotalChecks(); !ok { + return &ValidationError{Name: "total_checks", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.total_checks"`)} + } + if _, ok := _c.mutation.OkCount(); !ok { + return &ValidationError{Name: "ok_count", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.ok_count"`)} + } + if _, ok := _c.mutation.OperationalCount(); !ok { + return &ValidationError{Name: "operational_count", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.operational_count"`)} + } + if _, ok := _c.mutation.DegradedCount(); !ok { + return &ValidationError{Name: "degraded_count", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.degraded_count"`)} + } + if _, ok := _c.mutation.FailedCount(); !ok { + return &ValidationError{Name: "failed_count", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.failed_count"`)} + } + if _, ok := _c.mutation.ErrorCount(); !ok { + return &ValidationError{Name: "error_count", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.error_count"`)} + } + if _, ok := _c.mutation.SumLatencyMs(); !ok { + return &ValidationError{Name: "sum_latency_ms", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.sum_latency_ms"`)} + } + if _, ok := _c.mutation.CountLatency(); !ok { + return &ValidationError{Name: "count_latency", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.count_latency"`)} + } + if _, ok := _c.mutation.SumPingLatencyMs(); !ok { + return &ValidationError{Name: "sum_ping_latency_ms", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.sum_ping_latency_ms"`)} + } + if _, ok := _c.mutation.CountPingLatency(); !ok { + return &ValidationError{Name: "count_ping_latency", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.count_ping_latency"`)} + } + if _, ok := _c.mutation.ComputedAt(); !ok { + return &ValidationError{Name: "computed_at", err: errors.New(`ent: missing required field "ChannelMonitorDailyRollup.computed_at"`)} + } + if len(_c.mutation.MonitorIDs()) == 0 { + return &ValidationError{Name: "monitor", err: errors.New(`ent: missing required edge "ChannelMonitorDailyRollup.monitor"`)} + } + return nil +} + +func (_c *ChannelMonitorDailyRollupCreate) sqlSave(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ChannelMonitorDailyRollupCreate) createSpec() (*ChannelMonitorDailyRollup, *sqlgraph.CreateSpec) { + var ( + _node = &ChannelMonitorDailyRollup{config: _c.config} + _spec = sqlgraph.NewCreateSpec(channelmonitordailyrollup.Table, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Model(); ok { + _spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value) + _node.Model = value + } + if value, ok := _c.mutation.BucketDate(); ok { + _spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value) + _node.BucketDate = value + } + if value, ok := _c.mutation.TotalChecks(); ok { + _spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value) + _node.TotalChecks = value + } + if value, ok := _c.mutation.OkCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value) + _node.OkCount = value + } + if value, ok := _c.mutation.OperationalCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value) + _node.OperationalCount = value + } + if value, ok := _c.mutation.DegradedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value) + _node.DegradedCount = value + } + if value, ok := _c.mutation.FailedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value) + _node.FailedCount = value + } + if value, ok := _c.mutation.ErrorCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value) + _node.ErrorCount = value + } + if value, ok := _c.mutation.SumLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value) + _node.SumLatencyMs = value + } + if value, ok := _c.mutation.CountLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value) + _node.CountLatency = value + } + if value, ok := _c.mutation.SumPingLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value) + _node.SumPingLatencyMs = value + } + if value, ok := _c.mutation.CountPingLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value) + _node.CountPingLatency = value + } + if value, ok := _c.mutation.ComputedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value) + _node.ComputedAt = value + } + if nodes := _c.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitordailyrollup.MonitorTable, + Columns: []string{channelmonitordailyrollup.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.MonitorID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitorDailyRollup.Create(). +// SetDeletedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorDailyRollupUpsert) { +// SetDeletedAt(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorDailyRollupCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorDailyRollupUpsertOne { + _c.conflict = opts + return &ChannelMonitorDailyRollupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorDailyRollupCreate) OnConflictColumns(columns ...string) *ChannelMonitorDailyRollupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorDailyRollupUpsertOne{ + create: _c, + } +} + +type ( + // ChannelMonitorDailyRollupUpsertOne is the builder for "upsert"-ing + // one ChannelMonitorDailyRollup node. + ChannelMonitorDailyRollupUpsertOne struct { + create *ChannelMonitorDailyRollupCreate + } + + // ChannelMonitorDailyRollupUpsert is the "OnConflict" setter. + ChannelMonitorDailyRollupUpsert struct { + *sql.UpdateSet + } +) + +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsert) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateDeletedAt() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsert) ClearDeletedAt() *ChannelMonitorDailyRollupUpsert { + u.SetNull(channelmonitordailyrollup.FieldDeletedAt) + return u +} + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorDailyRollupUpsert) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldMonitorID, v) + return u +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateMonitorID() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldMonitorID) + return u +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorDailyRollupUpsert) SetModel(v string) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldModel, v) + return u +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateModel() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldModel) + return u +} + +// SetBucketDate sets the "bucket_date" field. +func (u *ChannelMonitorDailyRollupUpsert) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldBucketDate, v) + return u +} + +// UpdateBucketDate sets the "bucket_date" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateBucketDate() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldBucketDate) + return u +} + +// SetTotalChecks sets the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsert) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldTotalChecks, v) + return u +} + +// UpdateTotalChecks sets the "total_checks" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateTotalChecks() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldTotalChecks) + return u +} + +// AddTotalChecks adds v to the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsert) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldTotalChecks, v) + return u +} + +// SetOkCount sets the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsert) SetOkCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldOkCount, v) + return u +} + +// UpdateOkCount sets the "ok_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateOkCount() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldOkCount) + return u +} + +// AddOkCount adds v to the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsert) AddOkCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldOkCount, v) + return u +} + +// SetOperationalCount sets the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsert) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldOperationalCount, v) + return u +} + +// UpdateOperationalCount sets the "operational_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateOperationalCount() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldOperationalCount) + return u +} + +// AddOperationalCount adds v to the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsert) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldOperationalCount, v) + return u +} + +// SetDegradedCount sets the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsert) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldDegradedCount, v) + return u +} + +// UpdateDegradedCount sets the "degraded_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateDegradedCount() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldDegradedCount) + return u +} + +// AddDegradedCount adds v to the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsert) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldDegradedCount, v) + return u +} + +// SetFailedCount sets the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsert) SetFailedCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldFailedCount, v) + return u +} + +// UpdateFailedCount sets the "failed_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateFailedCount() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldFailedCount) + return u +} + +// AddFailedCount adds v to the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsert) AddFailedCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldFailedCount, v) + return u +} + +// SetErrorCount sets the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsert) SetErrorCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldErrorCount, v) + return u +} + +// UpdateErrorCount sets the "error_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateErrorCount() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldErrorCount) + return u +} + +// AddErrorCount adds v to the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsert) AddErrorCount(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldErrorCount, v) + return u +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsert) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldSumLatencyMs, v) + return u +} + +// UpdateSumLatencyMs sets the "sum_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateSumLatencyMs() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldSumLatencyMs) + return u +} + +// AddSumLatencyMs adds v to the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsert) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldSumLatencyMs, v) + return u +} + +// SetCountLatency sets the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsert) SetCountLatency(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldCountLatency, v) + return u +} + +// UpdateCountLatency sets the "count_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateCountLatency() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldCountLatency) + return u +} + +// AddCountLatency adds v to the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsert) AddCountLatency(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldCountLatency, v) + return u +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsert) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldSumPingLatencyMs, v) + return u +} + +// UpdateSumPingLatencyMs sets the "sum_ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateSumPingLatencyMs() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldSumPingLatencyMs) + return u +} + +// AddSumPingLatencyMs adds v to the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsert) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldSumPingLatencyMs, v) + return u +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsert) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldCountPingLatency, v) + return u +} + +// UpdateCountPingLatency sets the "count_ping_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateCountPingLatency() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldCountPingLatency) + return u +} + +// AddCountPingLatency adds v to the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsert) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpsert { + u.Add(channelmonitordailyrollup.FieldCountPingLatency, v) + return u +} + +// SetComputedAt sets the "computed_at" field. +func (u *ChannelMonitorDailyRollupUpsert) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpsert { + u.Set(channelmonitordailyrollup.FieldComputedAt, v) + return u +} + +// UpdateComputedAt sets the "computed_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsert) UpdateComputedAt() *ChannelMonitorDailyRollupUpsert { + u.SetExcluded(channelmonitordailyrollup.FieldComputedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateNewValues() *ChannelMonitorDailyRollupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorDailyRollupUpsertOne) Ignore() *ChannelMonitorDailyRollupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorDailyRollupUpsertOne) DoNothing() *ChannelMonitorDailyRollupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorDailyRollupCreate.OnConflict +// documentation for more info. +func (u *ChannelMonitorDailyRollupUpsertOne) Update(set func(*ChannelMonitorDailyRollupUpsert)) *ChannelMonitorDailyRollupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorDailyRollupUpsert{UpdateSet: update}) + })) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateDeletedAt() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsertOne) ClearDeletedAt() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetMonitorID(v) + }) +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateMonitorID() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateMonitorID() + }) +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetModel(v string) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateModel() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateModel() + }) +} + +// SetBucketDate sets the "bucket_date" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetBucketDate(v) + }) +} + +// UpdateBucketDate sets the "bucket_date" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateBucketDate() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateBucketDate() + }) +} + +// SetTotalChecks sets the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetTotalChecks(v) + }) +} + +// AddTotalChecks adds v to the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddTotalChecks(v) + }) +} + +// UpdateTotalChecks sets the "total_checks" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateTotalChecks() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateTotalChecks() + }) +} + +// SetOkCount sets the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetOkCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetOkCount(v) + }) +} + +// AddOkCount adds v to the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddOkCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddOkCount(v) + }) +} + +// UpdateOkCount sets the "ok_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateOkCount() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateOkCount() + }) +} + +// SetOperationalCount sets the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetOperationalCount(v) + }) +} + +// AddOperationalCount adds v to the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddOperationalCount(v) + }) +} + +// UpdateOperationalCount sets the "operational_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateOperationalCount() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateOperationalCount() + }) +} + +// SetDegradedCount sets the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetDegradedCount(v) + }) +} + +// AddDegradedCount adds v to the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddDegradedCount(v) + }) +} + +// UpdateDegradedCount sets the "degraded_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateDegradedCount() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateDegradedCount() + }) +} + +// SetFailedCount sets the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetFailedCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetFailedCount(v) + }) +} + +// AddFailedCount adds v to the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddFailedCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddFailedCount(v) + }) +} + +// UpdateFailedCount sets the "failed_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateFailedCount() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateFailedCount() + }) +} + +// SetErrorCount sets the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetErrorCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetErrorCount(v) + }) +} + +// AddErrorCount adds v to the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddErrorCount(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddErrorCount(v) + }) +} + +// UpdateErrorCount sets the "error_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateErrorCount() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateErrorCount() + }) +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetSumLatencyMs(v) + }) +} + +// AddSumLatencyMs adds v to the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddSumLatencyMs(v) + }) +} + +// UpdateSumLatencyMs sets the "sum_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateSumLatencyMs() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateSumLatencyMs() + }) +} + +// SetCountLatency sets the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetCountLatency(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetCountLatency(v) + }) +} + +// AddCountLatency adds v to the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddCountLatency(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddCountLatency(v) + }) +} + +// UpdateCountLatency sets the "count_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateCountLatency() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateCountLatency() + }) +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetSumPingLatencyMs(v) + }) +} + +// AddSumPingLatencyMs adds v to the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddSumPingLatencyMs(v) + }) +} + +// UpdateSumPingLatencyMs sets the "sum_ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateSumPingLatencyMs() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateSumPingLatencyMs() + }) +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetCountPingLatency(v) + }) +} + +// AddCountPingLatency adds v to the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsertOne) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddCountPingLatency(v) + }) +} + +// UpdateCountPingLatency sets the "count_ping_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateCountPingLatency() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateCountPingLatency() + }) +} + +// SetComputedAt sets the "computed_at" field. +func (u *ChannelMonitorDailyRollupUpsertOne) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetComputedAt(v) + }) +} + +// UpdateComputedAt sets the "computed_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertOne) UpdateComputedAt() *ChannelMonitorDailyRollupUpsertOne { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateComputedAt() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorDailyRollupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorDailyRollupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorDailyRollupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ChannelMonitorDailyRollupUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ChannelMonitorDailyRollupUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ChannelMonitorDailyRollupCreateBulk is the builder for creating many ChannelMonitorDailyRollup entities in bulk. +type ChannelMonitorDailyRollupCreateBulk struct { + config + err error + builders []*ChannelMonitorDailyRollupCreate + conflict []sql.ConflictOption +} + +// Save creates the ChannelMonitorDailyRollup entities in the database. +func (_c *ChannelMonitorDailyRollupCreateBulk) Save(ctx context.Context) ([]*ChannelMonitorDailyRollup, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*ChannelMonitorDailyRollup, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ChannelMonitorDailyRollupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ChannelMonitorDailyRollupCreateBulk) SaveX(ctx context.Context) []*ChannelMonitorDailyRollup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ChannelMonitorDailyRollupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ChannelMonitorDailyRollupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ChannelMonitorDailyRollup.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ChannelMonitorDailyRollupUpsert) { +// SetDeletedAt(v+v). +// }). +// Exec(ctx) +func (_c *ChannelMonitorDailyRollupCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorDailyRollupUpsertBulk { + _c.conflict = opts + return &ChannelMonitorDailyRollupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ChannelMonitorDailyRollupCreateBulk) OnConflictColumns(columns ...string) *ChannelMonitorDailyRollupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ChannelMonitorDailyRollupUpsertBulk{ + create: _c, + } +} + +// ChannelMonitorDailyRollupUpsertBulk is the builder for "upsert"-ing +// a bulk of ChannelMonitorDailyRollup nodes. +type ChannelMonitorDailyRollupUpsertBulk struct { + create *ChannelMonitorDailyRollupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateNewValues() *ChannelMonitorDailyRollupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ChannelMonitorDailyRollup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ChannelMonitorDailyRollupUpsertBulk) Ignore() *ChannelMonitorDailyRollupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ChannelMonitorDailyRollupUpsertBulk) DoNothing() *ChannelMonitorDailyRollupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ChannelMonitorDailyRollupCreateBulk.OnConflict +// documentation for more info. +func (u *ChannelMonitorDailyRollupUpsertBulk) Update(set func(*ChannelMonitorDailyRollupUpsert)) *ChannelMonitorDailyRollupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ChannelMonitorDailyRollupUpsert{UpdateSet: update}) + })) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateDeletedAt() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) ClearDeletedAt() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetMonitorID sets the "monitor_id" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetMonitorID(v) + }) +} + +// UpdateMonitorID sets the "monitor_id" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateMonitorID() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateMonitorID() + }) +} + +// SetModel sets the "model" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetModel(v string) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateModel() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateModel() + }) +} + +// SetBucketDate sets the "bucket_date" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetBucketDate(v) + }) +} + +// UpdateBucketDate sets the "bucket_date" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateBucketDate() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateBucketDate() + }) +} + +// SetTotalChecks sets the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetTotalChecks(v) + }) +} + +// AddTotalChecks adds v to the "total_checks" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddTotalChecks(v) + }) +} + +// UpdateTotalChecks sets the "total_checks" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateTotalChecks() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateTotalChecks() + }) +} + +// SetOkCount sets the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetOkCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetOkCount(v) + }) +} + +// AddOkCount adds v to the "ok_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddOkCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddOkCount(v) + }) +} + +// UpdateOkCount sets the "ok_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateOkCount() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateOkCount() + }) +} + +// SetOperationalCount sets the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetOperationalCount(v) + }) +} + +// AddOperationalCount adds v to the "operational_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddOperationalCount(v) + }) +} + +// UpdateOperationalCount sets the "operational_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateOperationalCount() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateOperationalCount() + }) +} + +// SetDegradedCount sets the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetDegradedCount(v) + }) +} + +// AddDegradedCount adds v to the "degraded_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddDegradedCount(v) + }) +} + +// UpdateDegradedCount sets the "degraded_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateDegradedCount() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateDegradedCount() + }) +} + +// SetFailedCount sets the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetFailedCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetFailedCount(v) + }) +} + +// AddFailedCount adds v to the "failed_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddFailedCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddFailedCount(v) + }) +} + +// UpdateFailedCount sets the "failed_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateFailedCount() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateFailedCount() + }) +} + +// SetErrorCount sets the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetErrorCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetErrorCount(v) + }) +} + +// AddErrorCount adds v to the "error_count" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddErrorCount(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddErrorCount(v) + }) +} + +// UpdateErrorCount sets the "error_count" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateErrorCount() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateErrorCount() + }) +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetSumLatencyMs(v) + }) +} + +// AddSumLatencyMs adds v to the "sum_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddSumLatencyMs(v) + }) +} + +// UpdateSumLatencyMs sets the "sum_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateSumLatencyMs() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateSumLatencyMs() + }) +} + +// SetCountLatency sets the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetCountLatency(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetCountLatency(v) + }) +} + +// AddCountLatency adds v to the "count_latency" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddCountLatency(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddCountLatency(v) + }) +} + +// UpdateCountLatency sets the "count_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateCountLatency() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateCountLatency() + }) +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetSumPingLatencyMs(v) + }) +} + +// AddSumPingLatencyMs adds v to the "sum_ping_latency_ms" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddSumPingLatencyMs(v) + }) +} + +// UpdateSumPingLatencyMs sets the "sum_ping_latency_ms" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateSumPingLatencyMs() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateSumPingLatencyMs() + }) +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetCountPingLatency(v) + }) +} + +// AddCountPingLatency adds v to the "count_ping_latency" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.AddCountPingLatency(v) + }) +} + +// UpdateCountPingLatency sets the "count_ping_latency" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateCountPingLatency() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateCountPingLatency() + }) +} + +// SetComputedAt sets the "computed_at" field. +func (u *ChannelMonitorDailyRollupUpsertBulk) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.SetComputedAt(v) + }) +} + +// UpdateComputedAt sets the "computed_at" field to the value that was provided on create. +func (u *ChannelMonitorDailyRollupUpsertBulk) UpdateComputedAt() *ChannelMonitorDailyRollupUpsertBulk { + return u.Update(func(s *ChannelMonitorDailyRollupUpsert) { + s.UpdateComputedAt() + }) +} + +// Exec executes the query. +func (u *ChannelMonitorDailyRollupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ChannelMonitorDailyRollupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ChannelMonitorDailyRollupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ChannelMonitorDailyRollupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitordailyrollup_delete.go b/backend/ent/channelmonitordailyrollup_delete.go new file mode 100644 index 00000000..460c94f8 --- /dev/null +++ b/backend/ent/channelmonitordailyrollup_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorDailyRollupDelete is the builder for deleting a ChannelMonitorDailyRollup entity. +type ChannelMonitorDailyRollupDelete struct { + config + hooks []Hook + mutation *ChannelMonitorDailyRollupMutation +} + +// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder. +func (_d *ChannelMonitorDailyRollupDelete) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ChannelMonitorDailyRollupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorDailyRollupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ChannelMonitorDailyRollupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(channelmonitordailyrollup.Table, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ChannelMonitorDailyRollupDeleteOne is the builder for deleting a single ChannelMonitorDailyRollup entity. +type ChannelMonitorDailyRollupDeleteOne struct { + _d *ChannelMonitorDailyRollupDelete +} + +// Where appends a list predicates to the ChannelMonitorDailyRollupDelete builder. +func (_d *ChannelMonitorDailyRollupDeleteOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ChannelMonitorDailyRollupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{channelmonitordailyrollup.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ChannelMonitorDailyRollupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/channelmonitordailyrollup_query.go b/backend/ent/channelmonitordailyrollup_query.go new file mode 100644 index 00000000..30528575 --- /dev/null +++ b/backend/ent/channelmonitordailyrollup_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorDailyRollupQuery is the builder for querying ChannelMonitorDailyRollup entities. +type ChannelMonitorDailyRollupQuery struct { + config + ctx *QueryContext + order []channelmonitordailyrollup.OrderOption + inters []Interceptor + predicates []predicate.ChannelMonitorDailyRollup + withMonitor *ChannelMonitorQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ChannelMonitorDailyRollupQuery builder. +func (_q *ChannelMonitorDailyRollupQuery) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ChannelMonitorDailyRollupQuery) Limit(limit int) *ChannelMonitorDailyRollupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ChannelMonitorDailyRollupQuery) Offset(offset int) *ChannelMonitorDailyRollupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ChannelMonitorDailyRollupQuery) Unique(unique bool) *ChannelMonitorDailyRollupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ChannelMonitorDailyRollupQuery) Order(o ...channelmonitordailyrollup.OrderOption) *ChannelMonitorDailyRollupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryMonitor chains the current query on the "monitor" edge. +func (_q *ChannelMonitorDailyRollupQuery) QueryMonitor() *ChannelMonitorQuery { + query := (&ChannelMonitorClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, selector), + sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ChannelMonitorDailyRollup entity from the query. +// Returns a *NotFoundError when no ChannelMonitorDailyRollup was found. +func (_q *ChannelMonitorDailyRollupQuery) First(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{channelmonitordailyrollup.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) FirstX(ctx context.Context) *ChannelMonitorDailyRollup { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ChannelMonitorDailyRollup ID from the query. +// Returns a *NotFoundError when no ChannelMonitorDailyRollup ID was found. +func (_q *ChannelMonitorDailyRollupQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{channelmonitordailyrollup.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ChannelMonitorDailyRollup entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup entity is found. +// Returns a *NotFoundError when no ChannelMonitorDailyRollup entities are found. +func (_q *ChannelMonitorDailyRollupQuery) Only(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{channelmonitordailyrollup.Label} + default: + return nil, &NotSingularError{channelmonitordailyrollup.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) OnlyX(ctx context.Context) *ChannelMonitorDailyRollup { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ChannelMonitorDailyRollup ID in the query. +// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ChannelMonitorDailyRollupQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{channelmonitordailyrollup.Label} + default: + err = &NotSingularError{channelmonitordailyrollup.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ChannelMonitorDailyRollups. +func (_q *ChannelMonitorDailyRollupQuery) All(ctx context.Context) ([]*ChannelMonitorDailyRollup, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ChannelMonitorDailyRollup, *ChannelMonitorDailyRollupQuery]() + return withInterceptors[[]*ChannelMonitorDailyRollup](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) AllX(ctx context.Context) []*ChannelMonitorDailyRollup { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ChannelMonitorDailyRollup IDs. +func (_q *ChannelMonitorDailyRollupQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(channelmonitordailyrollup.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ChannelMonitorDailyRollupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorDailyRollupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ChannelMonitorDailyRollupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ChannelMonitorDailyRollupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ChannelMonitorDailyRollupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ChannelMonitorDailyRollupQuery) Clone() *ChannelMonitorDailyRollupQuery { + if _q == nil { + return nil + } + return &ChannelMonitorDailyRollupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]channelmonitordailyrollup.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.ChannelMonitorDailyRollup{}, _q.predicates...), + withMonitor: _q.withMonitor.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithMonitor tells the query-builder to eager-load the nodes that are connected to +// the "monitor" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ChannelMonitorDailyRollupQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorDailyRollupQuery { + query := (&ChannelMonitorClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withMonitor = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// DeletedAt time.Time `json:"deleted_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ChannelMonitorDailyRollup.Query(). +// GroupBy(channelmonitordailyrollup.FieldDeletedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ChannelMonitorDailyRollupQuery) GroupBy(field string, fields ...string) *ChannelMonitorDailyRollupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ChannelMonitorDailyRollupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = channelmonitordailyrollup.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// DeletedAt time.Time `json:"deleted_at,omitempty"` +// } +// +// client.ChannelMonitorDailyRollup.Query(). +// Select(channelmonitordailyrollup.FieldDeletedAt). +// Scan(ctx, &v) +func (_q *ChannelMonitorDailyRollupQuery) Select(fields ...string) *ChannelMonitorDailyRollupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ChannelMonitorDailyRollupSelect{ChannelMonitorDailyRollupQuery: _q} + sbuild.label = channelmonitordailyrollup.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ChannelMonitorDailyRollupSelect configured with the given aggregations. +func (_q *ChannelMonitorDailyRollupQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ChannelMonitorDailyRollupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !channelmonitordailyrollup.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ChannelMonitorDailyRollupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorDailyRollup, error) { + var ( + nodes = []*ChannelMonitorDailyRollup{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withMonitor != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ChannelMonitorDailyRollup).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ChannelMonitorDailyRollup{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withMonitor; query != nil { + if err := _q.loadMonitor(ctx, query, nodes, nil, + func(n *ChannelMonitorDailyRollup, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *ChannelMonitorDailyRollupQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorDailyRollup, init func(*ChannelMonitorDailyRollup), assign func(*ChannelMonitorDailyRollup, *ChannelMonitor)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*ChannelMonitorDailyRollup) + for i := range nodes { + fk := nodes[i].MonitorID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(channelmonitor.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *ChannelMonitorDailyRollupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ChannelMonitorDailyRollupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID) + for i := range fields { + if fields[i] != channelmonitordailyrollup.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withMonitor != nil { + _spec.Node.AddColumnOnce(channelmonitordailyrollup.FieldMonitorID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ChannelMonitorDailyRollupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(channelmonitordailyrollup.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = channelmonitordailyrollup.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *ChannelMonitorDailyRollupQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *ChannelMonitorDailyRollupQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// ChannelMonitorDailyRollupGroupBy is the group-by builder for ChannelMonitorDailyRollup entities. +type ChannelMonitorDailyRollupGroupBy struct { + selector + build *ChannelMonitorDailyRollupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ChannelMonitorDailyRollupGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ChannelMonitorDailyRollupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ChannelMonitorDailyRollupGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ChannelMonitorDailyRollupSelect is the builder for selecting fields of ChannelMonitorDailyRollup entities. +type ChannelMonitorDailyRollupSelect struct { + *ChannelMonitorDailyRollupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ChannelMonitorDailyRollupSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ChannelMonitorDailyRollupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupSelect](ctx, _s.ChannelMonitorDailyRollupQuery, _s, _s.inters, v) +} + +func (_s *ChannelMonitorDailyRollupSelect) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/channelmonitordailyrollup_update.go b/backend/ent/channelmonitordailyrollup_update.go new file mode 100644 index 00000000..0b82f8bf --- /dev/null +++ b/backend/ent/channelmonitordailyrollup_update.go @@ -0,0 +1,1025 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ChannelMonitorDailyRollupUpdate is the builder for updating ChannelMonitorDailyRollup entities. +type ChannelMonitorDailyRollupUpdate struct { + config + hooks []Hook + mutation *ChannelMonitorDailyRollupMutation +} + +// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder. +func (_u *ChannelMonitorDailyRollupUpdate) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ChannelMonitorDailyRollupUpdate) ClearDeletedAt() *ChannelMonitorDailyRollupUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetMonitorID sets the "monitor_id" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdate { + _u.mutation.SetMonitorID(v) + return _u +} + +// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetMonitorID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetModel(v string) *ChannelMonitorDailyRollupUpdate { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetBucketDate sets the "bucket_date" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdate { + _u.mutation.SetBucketDate(v) + return _u +} + +// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetBucketDate(*v) + } + return _u +} + +// SetTotalChecks sets the "total_checks" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetTotalChecks() + _u.mutation.SetTotalChecks(v) + return _u +} + +// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetTotalChecks(*v) + } + return _u +} + +// AddTotalChecks adds value to the "total_checks" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddTotalChecks(v) + return _u +} + +// SetOkCount sets the "ok_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetOkCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetOkCount() + _u.mutation.SetOkCount(v) + return _u +} + +// SetNillableOkCount sets the "ok_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetOkCount(*v) + } + return _u +} + +// AddOkCount adds value to the "ok_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddOkCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddOkCount(v) + return _u +} + +// SetOperationalCount sets the "operational_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetOperationalCount() + _u.mutation.SetOperationalCount(v) + return _u +} + +// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetOperationalCount(*v) + } + return _u +} + +// AddOperationalCount adds value to the "operational_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddOperationalCount(v) + return _u +} + +// SetDegradedCount sets the "degraded_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetDegradedCount() + _u.mutation.SetDegradedCount(v) + return _u +} + +// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetDegradedCount(*v) + } + return _u +} + +// AddDegradedCount adds value to the "degraded_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddDegradedCount(v) + return _u +} + +// SetFailedCount sets the "failed_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetFailedCount() + _u.mutation.SetFailedCount(v) + return _u +} + +// SetNillableFailedCount sets the "failed_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetFailedCount(*v) + } + return _u +} + +// AddFailedCount adds value to the "failed_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddFailedCount(v) + return _u +} + +// SetErrorCount sets the "error_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetErrorCount() + _u.mutation.SetErrorCount(v) + return _u +} + +// SetNillableErrorCount sets the "error_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetErrorCount(*v) + } + return _u +} + +// AddErrorCount adds value to the "error_count" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddErrorCount(v) + return _u +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetSumLatencyMs() + _u.mutation.SetSumLatencyMs(v) + return _u +} + +// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetSumLatencyMs(*v) + } + return _u +} + +// AddSumLatencyMs adds value to the "sum_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddSumLatencyMs(v) + return _u +} + +// SetCountLatency sets the "count_latency" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetCountLatency() + _u.mutation.SetCountLatency(v) + return _u +} + +// SetNillableCountLatency sets the "count_latency" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetCountLatency(*v) + } + return _u +} + +// AddCountLatency adds value to the "count_latency" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddCountLatency(v) + return _u +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetSumPingLatencyMs() + _u.mutation.SetSumPingLatencyMs(v) + return _u +} + +// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetSumPingLatencyMs(*v) + } + return _u +} + +// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddSumPingLatencyMs(v) + return _u +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.ResetCountPingLatency() + _u.mutation.SetCountPingLatency(v) + return _u +} + +// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdate) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdate { + if v != nil { + _u.SetCountPingLatency(*v) + } + return _u +} + +// AddCountPingLatency adds value to the "count_ping_latency" field. +func (_u *ChannelMonitorDailyRollupUpdate) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdate { + _u.mutation.AddCountPingLatency(v) + return _u +} + +// SetComputedAt sets the "computed_at" field. +func (_u *ChannelMonitorDailyRollupUpdate) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdate { + _u.mutation.SetComputedAt(v) + return _u +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorDailyRollupUpdate) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdate { + return _u.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder. +func (_u *ChannelMonitorDailyRollupUpdate) Mutation() *ChannelMonitorDailyRollupMutation { + return _u.mutation +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorDailyRollupUpdate) ClearMonitor() *ChannelMonitorDailyRollupUpdate { + _u.mutation.ClearMonitor() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ChannelMonitorDailyRollupUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorDailyRollupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ChannelMonitorDailyRollupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorDailyRollupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ChannelMonitorDailyRollupUpdate) defaults() error { + if _, ok := _u.mutation.ComputedAt(); !ok { + if channelmonitordailyrollup.UpdateDefaultComputedAt == nil { + return fmt.Errorf("ent: uninitialized channelmonitordailyrollup.UpdateDefaultComputedAt (forgotten import ent/runtime?)") + } + v := channelmonitordailyrollup.UpdateDefaultComputedAt() + _u.mutation.SetComputedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorDailyRollupUpdate) check() error { + if v, ok := _u.mutation.Model(); ok { + if err := channelmonitordailyrollup.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)} + } + } + if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`) + } + return nil +} + +func (_u *ChannelMonitorDailyRollupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(channelmonitordailyrollup.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.BucketDate(); ok { + _spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value) + } + if value, ok := _u.mutation.TotalChecks(); ok { + _spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedTotalChecks(); ok { + _spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value) + } + if value, ok := _u.mutation.OkCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOkCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value) + } + if value, ok := _u.mutation.OperationalCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOperationalCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value) + } + if value, ok := _u.mutation.DegradedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDegradedCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.FailedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedFailedCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.ErrorCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedErrorCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value) + } + if value, ok := _u.mutation.SumLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedSumLatencyMs(); ok { + _spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.CountLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCountLatency(); ok { + _spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.SumPingLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok { + _spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.CountPingLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCountPingLatency(); ok { + _spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.ComputedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value) + } + if _u.mutation.MonitorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitordailyrollup.MonitorTable, + Columns: []string{channelmonitordailyrollup.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitordailyrollup.MonitorTable, + Columns: []string{channelmonitordailyrollup.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitordailyrollup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ChannelMonitorDailyRollupUpdateOne is the builder for updating a single ChannelMonitorDailyRollup entity. +type ChannelMonitorDailyRollupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ChannelMonitorDailyRollupMutation +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetDeletedAt(v time.Time) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableDeletedAt(v *time.Time) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) ClearDeletedAt() *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetMonitorID sets the "monitor_id" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitorID(v int64) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.SetMonitorID(v) + return _u +} + +// SetNillableMonitorID sets the "monitor_id" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableMonitorID(v *int64) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetMonitorID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetModel(v string) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableModel(v *string) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetBucketDate sets the "bucket_date" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetBucketDate(v time.Time) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.SetBucketDate(v) + return _u +} + +// SetNillableBucketDate sets the "bucket_date" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableBucketDate(v *time.Time) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetBucketDate(*v) + } + return _u +} + +// SetTotalChecks sets the "total_checks" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetTotalChecks() + _u.mutation.SetTotalChecks(v) + return _u +} + +// SetNillableTotalChecks sets the "total_checks" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableTotalChecks(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetTotalChecks(*v) + } + return _u +} + +// AddTotalChecks adds value to the "total_checks" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddTotalChecks(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddTotalChecks(v) + return _u +} + +// SetOkCount sets the "ok_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetOkCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetOkCount() + _u.mutation.SetOkCount(v) + return _u +} + +// SetNillableOkCount sets the "ok_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOkCount(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetOkCount(*v) + } + return _u +} + +// AddOkCount adds value to the "ok_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddOkCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddOkCount(v) + return _u +} + +// SetOperationalCount sets the "operational_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetOperationalCount() + _u.mutation.SetOperationalCount(v) + return _u +} + +// SetNillableOperationalCount sets the "operational_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableOperationalCount(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetOperationalCount(*v) + } + return _u +} + +// AddOperationalCount adds value to the "operational_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddOperationalCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddOperationalCount(v) + return _u +} + +// SetDegradedCount sets the "degraded_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetDegradedCount() + _u.mutation.SetDegradedCount(v) + return _u +} + +// SetNillableDegradedCount sets the "degraded_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableDegradedCount(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetDegradedCount(*v) + } + return _u +} + +// AddDegradedCount adds value to the "degraded_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddDegradedCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddDegradedCount(v) + return _u +} + +// SetFailedCount sets the "failed_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetFailedCount() + _u.mutation.SetFailedCount(v) + return _u +} + +// SetNillableFailedCount sets the "failed_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableFailedCount(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetFailedCount(*v) + } + return _u +} + +// AddFailedCount adds value to the "failed_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddFailedCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddFailedCount(v) + return _u +} + +// SetErrorCount sets the "error_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetErrorCount() + _u.mutation.SetErrorCount(v) + return _u +} + +// SetNillableErrorCount sets the "error_count" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableErrorCount(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetErrorCount(*v) + } + return _u +} + +// AddErrorCount adds value to the "error_count" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddErrorCount(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddErrorCount(v) + return _u +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetSumLatencyMs() + _u.mutation.SetSumLatencyMs(v) + return _u +} + +// SetNillableSumLatencyMs sets the "sum_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetSumLatencyMs(*v) + } + return _u +} + +// AddSumLatencyMs adds value to the "sum_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddSumLatencyMs(v) + return _u +} + +// SetCountLatency sets the "count_latency" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetCountLatency() + _u.mutation.SetCountLatency(v) + return _u +} + +// SetNillableCountLatency sets the "count_latency" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountLatency(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetCountLatency(*v) + } + return _u +} + +// AddCountLatency adds value to the "count_latency" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountLatency(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddCountLatency(v) + return _u +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetSumPingLatencyMs() + _u.mutation.SetSumPingLatencyMs(v) + return _u +} + +// SetNillableSumPingLatencyMs sets the "sum_ping_latency_ms" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableSumPingLatencyMs(v *int64) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetSumPingLatencyMs(*v) + } + return _u +} + +// AddSumPingLatencyMs adds value to the "sum_ping_latency_ms" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddSumPingLatencyMs(v int64) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddSumPingLatencyMs(v) + return _u +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ResetCountPingLatency() + _u.mutation.SetCountPingLatency(v) + return _u +} + +// SetNillableCountPingLatency sets the "count_ping_latency" field if the given value is not nil. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetNillableCountPingLatency(v *int) *ChannelMonitorDailyRollupUpdateOne { + if v != nil { + _u.SetCountPingLatency(*v) + } + return _u +} + +// AddCountPingLatency adds value to the "count_ping_latency" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) AddCountPingLatency(v int) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.AddCountPingLatency(v) + return _u +} + +// SetComputedAt sets the "computed_at" field. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetComputedAt(v time.Time) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.SetComputedAt(v) + return _u +} + +// SetMonitor sets the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorDailyRollupUpdateOne) SetMonitor(v *ChannelMonitor) *ChannelMonitorDailyRollupUpdateOne { + return _u.SetMonitorID(v.ID) +} + +// Mutation returns the ChannelMonitorDailyRollupMutation object of the builder. +func (_u *ChannelMonitorDailyRollupUpdateOne) Mutation() *ChannelMonitorDailyRollupMutation { + return _u.mutation +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (_u *ChannelMonitorDailyRollupUpdateOne) ClearMonitor() *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.ClearMonitor() + return _u +} + +// Where appends a list predicates to the ChannelMonitorDailyRollupUpdate builder. +func (_u *ChannelMonitorDailyRollupUpdateOne) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ChannelMonitorDailyRollupUpdateOne) Select(field string, fields ...string) *ChannelMonitorDailyRollupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated ChannelMonitorDailyRollup entity. +func (_u *ChannelMonitorDailyRollupUpdateOne) Save(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ChannelMonitorDailyRollupUpdateOne) SaveX(ctx context.Context) *ChannelMonitorDailyRollup { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ChannelMonitorDailyRollupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ChannelMonitorDailyRollupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ChannelMonitorDailyRollupUpdateOne) defaults() error { + if _, ok := _u.mutation.ComputedAt(); !ok { + if channelmonitordailyrollup.UpdateDefaultComputedAt == nil { + return fmt.Errorf("ent: uninitialized channelmonitordailyrollup.UpdateDefaultComputedAt (forgotten import ent/runtime?)") + } + v := channelmonitordailyrollup.UpdateDefaultComputedAt() + _u.mutation.SetComputedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ChannelMonitorDailyRollupUpdateOne) check() error { + if v, ok := _u.mutation.Model(); ok { + if err := channelmonitordailyrollup.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "ChannelMonitorDailyRollup.model": %w`, err)} + } + } + if _u.mutation.MonitorCleared() && len(_u.mutation.MonitorIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "ChannelMonitorDailyRollup.monitor"`) + } + return nil +} + +func (_u *ChannelMonitorDailyRollupUpdateOne) sqlSave(ctx context.Context) (_node *ChannelMonitorDailyRollup, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ChannelMonitorDailyRollup.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID) + for _, f := range fields { + if !channelmonitordailyrollup.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != channelmonitordailyrollup.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(channelmonitordailyrollup.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(channelmonitordailyrollup.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.BucketDate(); ok { + _spec.SetField(channelmonitordailyrollup.FieldBucketDate, field.TypeTime, value) + } + if value, ok := _u.mutation.TotalChecks(); ok { + _spec.SetField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedTotalChecks(); ok { + _spec.AddField(channelmonitordailyrollup.FieldTotalChecks, field.TypeInt, value) + } + if value, ok := _u.mutation.OkCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOkCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldOkCount, field.TypeInt, value) + } + if value, ok := _u.mutation.OperationalCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOperationalCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldOperationalCount, field.TypeInt, value) + } + if value, ok := _u.mutation.DegradedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDegradedCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldDegradedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.FailedCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedFailedCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldFailedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.ErrorCount(); ok { + _spec.SetField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedErrorCount(); ok { + _spec.AddField(channelmonitordailyrollup.FieldErrorCount, field.TypeInt, value) + } + if value, ok := _u.mutation.SumLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedSumLatencyMs(); ok { + _spec.AddField(channelmonitordailyrollup.FieldSumLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.CountLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCountLatency(); ok { + _spec.AddField(channelmonitordailyrollup.FieldCountLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.SumPingLatencyMs(); ok { + _spec.SetField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedSumPingLatencyMs(); ok { + _spec.AddField(channelmonitordailyrollup.FieldSumPingLatencyMs, field.TypeInt64, value) + } + if value, ok := _u.mutation.CountPingLatency(); ok { + _spec.SetField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCountPingLatency(); ok { + _spec.AddField(channelmonitordailyrollup.FieldCountPingLatency, field.TypeInt, value) + } + if value, ok := _u.mutation.ComputedAt(); ok { + _spec.SetField(channelmonitordailyrollup.FieldComputedAt, field.TypeTime, value) + } + if _u.mutation.MonitorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitordailyrollup.MonitorTable, + Columns: []string{channelmonitordailyrollup.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.MonitorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: channelmonitordailyrollup.MonitorTable, + Columns: []string{channelmonitordailyrollup.MonitorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(channelmonitor.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ChannelMonitorDailyRollup{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{channelmonitordailyrollup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/channelmonitorhistory.go b/backend/ent/channelmonitorhistory.go index 70dde542..256eaf5f 100644 --- a/backend/ent/channelmonitorhistory.go +++ b/backend/ent/channelmonitorhistory.go @@ -18,6 +18,8 @@ type ChannelMonitorHistory struct { config `json:"-"` // ID of the ent. ID int64 `json:"id,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` // MonitorID holds the value of the "monitor_id" field. MonitorID int64 `json:"monitor_id,omitempty"` // Model holds the value of the "model" field. @@ -67,7 +69,7 @@ func (*ChannelMonitorHistory) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case channelmonitorhistory.FieldModel, channelmonitorhistory.FieldStatus, channelmonitorhistory.FieldMessage: values[i] = new(sql.NullString) - case channelmonitorhistory.FieldCheckedAt: + case channelmonitorhistory.FieldDeletedAt, channelmonitorhistory.FieldCheckedAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -90,6 +92,13 @@ func (_m *ChannelMonitorHistory) assignValues(columns []string, values []any) er return fmt.Errorf("unexpected type %T for field id", value) } _m.ID = int64(value.Int64) + case channelmonitorhistory.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } case channelmonitorhistory.FieldMonitorID: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field monitor_id", values[i]) @@ -175,6 +184,11 @@ func (_m *ChannelMonitorHistory) String() string { var builder strings.Builder builder.WriteString("ChannelMonitorHistory(") builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") builder.WriteString("monitor_id=") builder.WriteString(fmt.Sprintf("%v", _m.MonitorID)) builder.WriteString(", ") diff --git a/backend/ent/channelmonitorhistory/channelmonitorhistory.go b/backend/ent/channelmonitorhistory/channelmonitorhistory.go index 6a9dc006..da59791b 100644 --- a/backend/ent/channelmonitorhistory/channelmonitorhistory.go +++ b/backend/ent/channelmonitorhistory/channelmonitorhistory.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" ) @@ -15,6 +16,8 @@ const ( Label = "channel_monitor_history" // FieldID holds the string denoting the id field in the database. FieldID = "id" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" // FieldMonitorID holds the string denoting the monitor_id field in the database. FieldMonitorID = "monitor_id" // FieldModel holds the string denoting the model field in the database. @@ -45,6 +48,7 @@ const ( // Columns holds all SQL columns for channelmonitorhistory fields. var Columns = []string{ FieldID, + FieldDeletedAt, FieldMonitorID, FieldModel, FieldStatus, @@ -64,7 +68,14 @@ func ValidColumn(column string) bool { return false } +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor // ModelValidator is a validator for the "model" field. It is called by the builders before save. ModelValidator func(string) error // DefaultMessage holds the default value on creation for the "message" field. @@ -108,6 +119,11 @@ func ByID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldID, opts...).ToFunc() } +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + // ByMonitorID orders the results by the monitor_id field. func ByMonitorID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldMonitorID, opts...).ToFunc() diff --git a/backend/ent/channelmonitorhistory/where.go b/backend/ent/channelmonitorhistory/where.go index afa73f35..7b1cd50d 100644 --- a/backend/ent/channelmonitorhistory/where.go +++ b/backend/ent/channelmonitorhistory/where.go @@ -55,6 +55,11 @@ func IDLTE(id int64) predicate.ChannelMonitorHistory { return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldID, id)) } +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldDeletedAt, v)) +} + // MonitorID applies equality check predicate on the "monitor_id" field. It's identical to MonitorIDEQ. func MonitorID(v int64) predicate.ChannelMonitorHistory { return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) @@ -85,6 +90,56 @@ func CheckedAt(v time.Time) predicate.ChannelMonitorHistory { return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldCheckedAt, v)) } +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.ChannelMonitorHistory { + return predicate.ChannelMonitorHistory(sql.FieldNotNull(FieldDeletedAt)) +} + // MonitorIDEQ applies the EQ predicate on the "monitor_id" field. func MonitorIDEQ(v int64) predicate.ChannelMonitorHistory { return predicate.ChannelMonitorHistory(sql.FieldEQ(FieldMonitorID, v)) diff --git a/backend/ent/channelmonitorhistory_create.go b/backend/ent/channelmonitorhistory_create.go index 71034865..9a68c9ce 100644 --- a/backend/ent/channelmonitorhistory_create.go +++ b/backend/ent/channelmonitorhistory_create.go @@ -23,6 +23,20 @@ type ChannelMonitorHistoryCreate struct { conflict []sql.ConflictOption } +// SetDeletedAt sets the "deleted_at" field. +func (_c *ChannelMonitorHistoryCreate) SetDeletedAt(v time.Time) *ChannelMonitorHistoryCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *ChannelMonitorHistoryCreate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + // SetMonitorID sets the "monitor_id" field. func (_c *ChannelMonitorHistoryCreate) SetMonitorID(v int64) *ChannelMonitorHistoryCreate { _c.mutation.SetMonitorID(v) @@ -109,7 +123,9 @@ func (_c *ChannelMonitorHistoryCreate) Mutation() *ChannelMonitorHistoryMutation // Save creates the ChannelMonitorHistory in the database. func (_c *ChannelMonitorHistoryCreate) Save(ctx context.Context) (*ChannelMonitorHistory, error) { - _c.defaults() + if err := _c.defaults(); err != nil { + return nil, err + } return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) } @@ -136,15 +152,19 @@ func (_c *ChannelMonitorHistoryCreate) ExecX(ctx context.Context) { } // defaults sets the default values of the builder before save. -func (_c *ChannelMonitorHistoryCreate) defaults() { +func (_c *ChannelMonitorHistoryCreate) defaults() error { if _, ok := _c.mutation.Message(); !ok { v := channelmonitorhistory.DefaultMessage _c.mutation.SetMessage(v) } if _, ok := _c.mutation.CheckedAt(); !ok { + if channelmonitorhistory.DefaultCheckedAt == nil { + return fmt.Errorf("ent: uninitialized channelmonitorhistory.DefaultCheckedAt (forgotten import ent/runtime?)") + } v := channelmonitorhistory.DefaultCheckedAt() _c.mutation.SetCheckedAt(v) } + return nil } // check runs all checks and user-defined validators on the builder. @@ -206,6 +226,10 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq _spec = sqlgraph.NewCreateSpec(channelmonitorhistory.Table, sqlgraph.NewFieldSpec(channelmonitorhistory.FieldID, field.TypeInt64)) ) _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } if value, ok := _c.mutation.Model(); ok { _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) _node.Model = value @@ -254,7 +278,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq // of the `INSERT` statement. For example: // // client.ChannelMonitorHistory.Create(). -// SetMonitorID(v). +// SetDeletedAt(v). // OnConflict( // // Update the row with the new values // // the was proposed for insertion. @@ -263,7 +287,7 @@ func (_c *ChannelMonitorHistoryCreate) createSpec() (*ChannelMonitorHistory, *sq // // Override some of the fields with custom // // update values. // Update(func(u *ent.ChannelMonitorHistoryUpsert) { -// SetMonitorID(v+v). +// SetDeletedAt(v+v). // }). // Exec(ctx) func (_c *ChannelMonitorHistoryCreate) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertOne { @@ -299,6 +323,24 @@ type ( } ) +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsert) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsert { + u.Set(channelmonitorhistory.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsert) UpdateDeletedAt() *ChannelMonitorHistoryUpsert { + u.SetExcluded(channelmonitorhistory.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsert) ClearDeletedAt() *ChannelMonitorHistoryUpsert { + u.SetNull(channelmonitorhistory.FieldDeletedAt) + return u +} + // SetMonitorID sets the "monitor_id" field. func (u *ChannelMonitorHistoryUpsert) SetMonitorID(v int64) *ChannelMonitorHistoryUpsert { u.Set(channelmonitorhistory.FieldMonitorID, v) @@ -453,6 +495,27 @@ func (u *ChannelMonitorHistoryUpsertOne) Update(set func(*ChannelMonitorHistoryU return u } +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsertOne) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertOne) UpdateDeletedAt() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsertOne) ClearDeletedAt() *ChannelMonitorHistoryUpsertOne { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearDeletedAt() + }) +} + // SetMonitorID sets the "monitor_id" field. func (u *ChannelMonitorHistoryUpsertOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertOne { return u.Update(func(s *ChannelMonitorHistoryUpsert) { @@ -721,7 +784,7 @@ func (_c *ChannelMonitorHistoryCreateBulk) ExecX(ctx context.Context) { // // Override some of the fields with custom // // update values. // Update(func(u *ent.ChannelMonitorHistoryUpsert) { -// SetMonitorID(v+v). +// SetDeletedAt(v+v). // }). // Exec(ctx) func (_c *ChannelMonitorHistoryCreateBulk) OnConflict(opts ...sql.ConflictOption) *ChannelMonitorHistoryUpsertBulk { @@ -790,6 +853,27 @@ func (u *ChannelMonitorHistoryUpsertBulk) Update(set func(*ChannelMonitorHistory return u } +// SetDeletedAt sets the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsertBulk) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ChannelMonitorHistoryUpsertBulk) UpdateDeletedAt() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ChannelMonitorHistoryUpsertBulk) ClearDeletedAt() *ChannelMonitorHistoryUpsertBulk { + return u.Update(func(s *ChannelMonitorHistoryUpsert) { + s.ClearDeletedAt() + }) +} + // SetMonitorID sets the "monitor_id" field. func (u *ChannelMonitorHistoryUpsertBulk) SetMonitorID(v int64) *ChannelMonitorHistoryUpsertBulk { return u.Update(func(s *ChannelMonitorHistoryUpsert) { diff --git a/backend/ent/channelmonitorhistory_query.go b/backend/ent/channelmonitorhistory_query.go index 1fb872ad..26a1528f 100644 --- a/backend/ent/channelmonitorhistory_query.go +++ b/backend/ent/channelmonitorhistory_query.go @@ -300,12 +300,12 @@ func (_q *ChannelMonitorHistoryQuery) WithMonitor(opts ...func(*ChannelMonitorQu // Example: // // var v []struct { -// MonitorID int64 `json:"monitor_id,omitempty"` +// DeletedAt time.Time `json:"deleted_at,omitempty"` // Count int `json:"count,omitempty"` // } // // client.ChannelMonitorHistory.Query(). -// GroupBy(channelmonitorhistory.FieldMonitorID). +// GroupBy(channelmonitorhistory.FieldDeletedAt). // Aggregate(ent.Count()). // Scan(ctx, &v) func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *ChannelMonitorHistoryGroupBy { @@ -323,11 +323,11 @@ func (_q *ChannelMonitorHistoryQuery) GroupBy(field string, fields ...string) *C // Example: // // var v []struct { -// MonitorID int64 `json:"monitor_id,omitempty"` +// DeletedAt time.Time `json:"deleted_at,omitempty"` // } // // client.ChannelMonitorHistory.Query(). -// Select(channelmonitorhistory.FieldMonitorID). +// Select(channelmonitorhistory.FieldDeletedAt). // Scan(ctx, &v) func (_q *ChannelMonitorHistoryQuery) Select(fields ...string) *ChannelMonitorHistorySelect { _q.ctx.Fields = append(_q.ctx.Fields, fields...) diff --git a/backend/ent/channelmonitorhistory_update.go b/backend/ent/channelmonitorhistory_update.go index a85a8072..85193ec1 100644 --- a/backend/ent/channelmonitorhistory_update.go +++ b/backend/ent/channelmonitorhistory_update.go @@ -29,6 +29,26 @@ func (_u *ChannelMonitorHistoryUpdate) Where(ps ...predicate.ChannelMonitorHisto return _u } +// SetDeletedAt sets the "deleted_at" field. +func (_u *ChannelMonitorHistoryUpdate) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdate) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ChannelMonitorHistoryUpdate) ClearDeletedAt() *ChannelMonitorHistoryUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + // SetMonitorID sets the "monitor_id" field. func (_u *ChannelMonitorHistoryUpdate) SetMonitorID(v int64) *ChannelMonitorHistoryUpdate { _u.mutation.SetMonitorID(v) @@ -237,6 +257,12 @@ func (_u *ChannelMonitorHistoryUpdate) sqlSave(ctx context.Context) (_node int, } } } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(channelmonitorhistory.FieldDeletedAt, field.TypeTime) + } if value, ok := _u.mutation.Model(); ok { _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) } @@ -319,6 +345,26 @@ type ChannelMonitorHistoryUpdateOne struct { mutation *ChannelMonitorHistoryMutation } +// SetDeletedAt sets the "deleted_at" field. +func (_u *ChannelMonitorHistoryUpdateOne) SetDeletedAt(v time.Time) *ChannelMonitorHistoryUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ChannelMonitorHistoryUpdateOne) SetNillableDeletedAt(v *time.Time) *ChannelMonitorHistoryUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ChannelMonitorHistoryUpdateOne) ClearDeletedAt() *ChannelMonitorHistoryUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + // SetMonitorID sets the "monitor_id" field. func (_u *ChannelMonitorHistoryUpdateOne) SetMonitorID(v int64) *ChannelMonitorHistoryUpdateOne { _u.mutation.SetMonitorID(v) @@ -557,6 +603,12 @@ func (_u *ChannelMonitorHistoryUpdateOne) sqlSave(ctx context.Context) (_node *C } } } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(channelmonitorhistory.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(channelmonitorhistory.FieldDeletedAt, field.TypeTime) + } if value, ok := _u.mutation.Model(); ok { _spec.SetField(channelmonitorhistory.FieldModel, field.TypeString, value) } diff --git a/backend/ent/client.go b/backend/ent/client.go index 72ef2a36..ca208094 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -23,6 +23,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" @@ -72,6 +73,8 @@ type Client struct { AuthIdentityChannel *AuthIdentityChannelClient // ChannelMonitor is the client for interacting with the ChannelMonitor builders. ChannelMonitor *ChannelMonitorClient + // ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders. + ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. ChannelMonitorHistory *ChannelMonitorHistoryClient // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. @@ -139,6 +142,7 @@ func (c *Client) init() { c.AuthIdentity = NewAuthIdentityClient(c.config) c.AuthIdentityChannel = NewAuthIdentityChannelClient(c.config) c.ChannelMonitor = NewChannelMonitorClient(c.config) + c.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(c.config) c.ChannelMonitorHistory = NewChannelMonitorHistoryClient(c.config) c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config) c.Group = NewGroupClient(c.config) @@ -253,40 +257,41 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { cfg := c.config cfg.driver = tx return &Tx{ - ctx: ctx, - config: cfg, - APIKey: NewAPIKeyClient(cfg), - Account: NewAccountClient(cfg), - AccountGroup: NewAccountGroupClient(cfg), - Announcement: NewAnnouncementClient(cfg), - AnnouncementRead: NewAnnouncementReadClient(cfg), - AuthIdentity: NewAuthIdentityClient(cfg), - AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), - ChannelMonitor: NewChannelMonitorClient(cfg), - ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), - ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), - Group: NewGroupClient(cfg), - IdempotencyRecord: NewIdempotencyRecordClient(cfg), - IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), - PaymentAuditLog: NewPaymentAuditLogClient(cfg), - PaymentOrder: NewPaymentOrderClient(cfg), - PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), - PendingAuthSession: NewPendingAuthSessionClient(cfg), - PromoCode: NewPromoCodeClient(cfg), - PromoCodeUsage: NewPromoCodeUsageClient(cfg), - Proxy: NewProxyClient(cfg), - RedeemCode: NewRedeemCodeClient(cfg), - SecuritySecret: NewSecuritySecretClient(cfg), - Setting: NewSettingClient(cfg), - SubscriptionPlan: NewSubscriptionPlanClient(cfg), - TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), - UsageCleanupTask: NewUsageCleanupTaskClient(cfg), - UsageLog: NewUsageLogClient(cfg), - User: NewUserClient(cfg), - UserAllowedGroup: NewUserAllowedGroupClient(cfg), - UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), - UserAttributeValue: NewUserAttributeValueClient(cfg), - UserSubscription: NewUserSubscriptionClient(cfg), + ctx: ctx, + config: cfg, + APIKey: NewAPIKeyClient(cfg), + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), + AuthIdentity: NewAuthIdentityClient(cfg), + AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), + ChannelMonitor: NewChannelMonitorClient(cfg), + ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg), + ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), + ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), + Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), + IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), + PaymentAuditLog: NewPaymentAuditLogClient(cfg), + PaymentOrder: NewPaymentOrderClient(cfg), + PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), + PendingAuthSession: NewPendingAuthSessionClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + SecuritySecret: NewSecuritySecretClient(cfg), + Setting: NewSettingClient(cfg), + SubscriptionPlan: NewSubscriptionPlanClient(cfg), + TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), }, nil } @@ -304,40 +309,41 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) cfg := c.config cfg.driver = &txDriver{tx: tx, drv: c.driver} return &Tx{ - ctx: ctx, - config: cfg, - APIKey: NewAPIKeyClient(cfg), - Account: NewAccountClient(cfg), - AccountGroup: NewAccountGroupClient(cfg), - Announcement: NewAnnouncementClient(cfg), - AnnouncementRead: NewAnnouncementReadClient(cfg), - AuthIdentity: NewAuthIdentityClient(cfg), - AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), - ChannelMonitor: NewChannelMonitorClient(cfg), - ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), - ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), - Group: NewGroupClient(cfg), - IdempotencyRecord: NewIdempotencyRecordClient(cfg), - IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), - PaymentAuditLog: NewPaymentAuditLogClient(cfg), - PaymentOrder: NewPaymentOrderClient(cfg), - PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), - PendingAuthSession: NewPendingAuthSessionClient(cfg), - PromoCode: NewPromoCodeClient(cfg), - PromoCodeUsage: NewPromoCodeUsageClient(cfg), - Proxy: NewProxyClient(cfg), - RedeemCode: NewRedeemCodeClient(cfg), - SecuritySecret: NewSecuritySecretClient(cfg), - Setting: NewSettingClient(cfg), - SubscriptionPlan: NewSubscriptionPlanClient(cfg), - TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), - UsageCleanupTask: NewUsageCleanupTaskClient(cfg), - UsageLog: NewUsageLogClient(cfg), - User: NewUserClient(cfg), - UserAllowedGroup: NewUserAllowedGroupClient(cfg), - UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), - UserAttributeValue: NewUserAttributeValueClient(cfg), - UserSubscription: NewUserSubscriptionClient(cfg), + ctx: ctx, + config: cfg, + APIKey: NewAPIKeyClient(cfg), + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + Announcement: NewAnnouncementClient(cfg), + AnnouncementRead: NewAnnouncementReadClient(cfg), + AuthIdentity: NewAuthIdentityClient(cfg), + AuthIdentityChannel: NewAuthIdentityChannelClient(cfg), + ChannelMonitor: NewChannelMonitorClient(cfg), + ChannelMonitorDailyRollup: NewChannelMonitorDailyRollupClient(cfg), + ChannelMonitorHistory: NewChannelMonitorHistoryClient(cfg), + ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), + Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), + IdentityAdoptionDecision: NewIdentityAdoptionDecisionClient(cfg), + PaymentAuditLog: NewPaymentAuditLogClient(cfg), + PaymentOrder: NewPaymentOrderClient(cfg), + PaymentProviderInstance: NewPaymentProviderInstanceClient(cfg), + PendingAuthSession: NewPendingAuthSessionClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + SecuritySecret: NewSecuritySecretClient(cfg), + Setting: NewSettingClient(cfg), + SubscriptionPlan: NewSubscriptionPlanClient(cfg), + TLSFingerprintProfile: NewTLSFingerprintProfileClient(cfg), + UsageCleanupTask: NewUsageCleanupTaskClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), }, nil } @@ -369,12 +375,12 @@ func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, - c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, - c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, - c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, - c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, - c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory, c.ErrorPassthroughRule, + c.Group, c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog, + c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, + c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) @@ -387,12 +393,12 @@ func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, c.AuthIdentity, c.AuthIdentityChannel, c.ChannelMonitor, - c.ChannelMonitorHistory, c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, - c.IdentityAdoptionDecision, c.PaymentAuditLog, c.PaymentOrder, - c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, c.PromoCodeUsage, - c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, c.SubscriptionPlan, - c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, c.User, - c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, + c.ChannelMonitorDailyRollup, c.ChannelMonitorHistory, c.ErrorPassthroughRule, + c.Group, c.IdempotencyRecord, c.IdentityAdoptionDecision, c.PaymentAuditLog, + c.PaymentOrder, c.PaymentProviderInstance, c.PendingAuthSession, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.SubscriptionPlan, c.TLSFingerprintProfile, c.UsageCleanupTask, c.UsageLog, + c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) @@ -418,6 +424,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.AuthIdentityChannel.mutate(ctx, m) case *ChannelMonitorMutation: return c.ChannelMonitor.mutate(ctx, m) + case *ChannelMonitorDailyRollupMutation: + return c.ChannelMonitorDailyRollup.mutate(ctx, m) case *ChannelMonitorHistoryMutation: return c.ChannelMonitorHistory.mutate(ctx, m) case *ErrorPassthroughRuleMutation: @@ -1737,6 +1745,22 @@ func (c *ChannelMonitorClient) QueryHistory(_m *ChannelMonitor) *ChannelMonitorH return query } +// QueryDailyRollups queries the daily_rollups edge of a ChannelMonitor. +func (c *ChannelMonitorClient) QueryDailyRollups(_m *ChannelMonitor) *ChannelMonitorDailyRollupQuery { + query := (&ChannelMonitorDailyRollupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitor.Table, channelmonitor.FieldID, id), + sqlgraph.To(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, channelmonitor.DailyRollupsTable, channelmonitor.DailyRollupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + // Hooks returns the client hooks. func (c *ChannelMonitorClient) Hooks() []Hook { return c.hooks.ChannelMonitor @@ -1762,6 +1786,157 @@ func (c *ChannelMonitorClient) mutate(ctx context.Context, m *ChannelMonitorMuta } } +// ChannelMonitorDailyRollupClient is a client for the ChannelMonitorDailyRollup schema. +type ChannelMonitorDailyRollupClient struct { + config +} + +// NewChannelMonitorDailyRollupClient returns a client for the ChannelMonitorDailyRollup from the given config. +func NewChannelMonitorDailyRollupClient(c config) *ChannelMonitorDailyRollupClient { + return &ChannelMonitorDailyRollupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `channelmonitordailyrollup.Hooks(f(g(h())))`. +func (c *ChannelMonitorDailyRollupClient) Use(hooks ...Hook) { + c.hooks.ChannelMonitorDailyRollup = append(c.hooks.ChannelMonitorDailyRollup, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `channelmonitordailyrollup.Intercept(f(g(h())))`. +func (c *ChannelMonitorDailyRollupClient) Intercept(interceptors ...Interceptor) { + c.inters.ChannelMonitorDailyRollup = append(c.inters.ChannelMonitorDailyRollup, interceptors...) +} + +// Create returns a builder for creating a ChannelMonitorDailyRollup entity. +func (c *ChannelMonitorDailyRollupClient) Create() *ChannelMonitorDailyRollupCreate { + mutation := newChannelMonitorDailyRollupMutation(c.config, OpCreate) + return &ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ChannelMonitorDailyRollup entities. +func (c *ChannelMonitorDailyRollupClient) CreateBulk(builders ...*ChannelMonitorDailyRollupCreate) *ChannelMonitorDailyRollupCreateBulk { + return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ChannelMonitorDailyRollupClient) MapCreateBulk(slice any, setFunc func(*ChannelMonitorDailyRollupCreate, int)) *ChannelMonitorDailyRollupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ChannelMonitorDailyRollupCreateBulk{err: fmt.Errorf("calling to ChannelMonitorDailyRollupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ChannelMonitorDailyRollupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ChannelMonitorDailyRollupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ChannelMonitorDailyRollup. +func (c *ChannelMonitorDailyRollupClient) Update() *ChannelMonitorDailyRollupUpdate { + mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdate) + return &ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ChannelMonitorDailyRollupClient) UpdateOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupUpdateOne { + mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollup(_m)) + return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ChannelMonitorDailyRollupClient) UpdateOneID(id int64) *ChannelMonitorDailyRollupUpdateOne { + mutation := newChannelMonitorDailyRollupMutation(c.config, OpUpdateOne, withChannelMonitorDailyRollupID(id)) + return &ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ChannelMonitorDailyRollup. +func (c *ChannelMonitorDailyRollupClient) Delete() *ChannelMonitorDailyRollupDelete { + mutation := newChannelMonitorDailyRollupMutation(c.config, OpDelete) + return &ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ChannelMonitorDailyRollupClient) DeleteOne(_m *ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ChannelMonitorDailyRollupClient) DeleteOneID(id int64) *ChannelMonitorDailyRollupDeleteOne { + builder := c.Delete().Where(channelmonitordailyrollup.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ChannelMonitorDailyRollupDeleteOne{builder} +} + +// Query returns a query builder for ChannelMonitorDailyRollup. +func (c *ChannelMonitorDailyRollupClient) Query() *ChannelMonitorDailyRollupQuery { + return &ChannelMonitorDailyRollupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeChannelMonitorDailyRollup}, + inters: c.Interceptors(), + } +} + +// Get returns a ChannelMonitorDailyRollup entity by its id. +func (c *ChannelMonitorDailyRollupClient) Get(ctx context.Context, id int64) (*ChannelMonitorDailyRollup, error) { + return c.Query().Where(channelmonitordailyrollup.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ChannelMonitorDailyRollupClient) GetX(ctx context.Context, id int64) *ChannelMonitorDailyRollup { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryMonitor queries the monitor edge of a ChannelMonitorDailyRollup. +func (c *ChannelMonitorDailyRollupClient) QueryMonitor(_m *ChannelMonitorDailyRollup) *ChannelMonitorQuery { + query := (&ChannelMonitorClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, id), + sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ChannelMonitorDailyRollupClient) Hooks() []Hook { + hooks := c.hooks.ChannelMonitorDailyRollup + return append(hooks[:len(hooks):len(hooks)], channelmonitordailyrollup.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *ChannelMonitorDailyRollupClient) Interceptors() []Interceptor { + inters := c.inters.ChannelMonitorDailyRollup + return append(inters[:len(inters):len(inters)], channelmonitordailyrollup.Interceptors[:]...) +} + +func (c *ChannelMonitorDailyRollupClient) mutate(ctx context.Context, m *ChannelMonitorDailyRollupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ChannelMonitorDailyRollupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ChannelMonitorDailyRollupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ChannelMonitorDailyRollupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ChannelMonitorDailyRollupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ChannelMonitorDailyRollup mutation op: %q", m.Op()) + } +} + // ChannelMonitorHistoryClient is a client for the ChannelMonitorHistory schema. type ChannelMonitorHistoryClient struct { config @@ -1888,12 +2063,14 @@ func (c *ChannelMonitorHistoryClient) QueryMonitor(_m *ChannelMonitorHistory) *C // Hooks returns the client hooks. func (c *ChannelMonitorHistoryClient) Hooks() []Hook { - return c.hooks.ChannelMonitorHistory + hooks := c.hooks.ChannelMonitorHistory + return append(hooks[:len(hooks):len(hooks)], channelmonitorhistory.Hooks[:]...) } // Interceptors returns the client interceptors. func (c *ChannelMonitorHistoryClient) Interceptors() []Interceptor { - return c.inters.ChannelMonitorHistory + inters := c.inters.ChannelMonitorHistory + return append(inters[:len(inters):len(inters)], channelmonitorhistory.Interceptors[:]...) } func (c *ChannelMonitorHistoryClient) mutate(ctx context.Context, m *ChannelMonitorHistoryMutation) (Value, error) { @@ -5671,23 +5848,23 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, - AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, - ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, - PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, - PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, - SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, - UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, - UserSubscription []ent.Hook + AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup, + ChannelMonitorHistory, ErrorPassthroughRule, Group, IdempotencyRecord, + IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder, + PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile, + UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Hook } inters struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, AuthIdentity, - AuthIdentityChannel, ChannelMonitor, ChannelMonitorHistory, - ErrorPassthroughRule, Group, IdempotencyRecord, IdentityAdoptionDecision, - PaymentAuditLog, PaymentOrder, PaymentProviderInstance, PendingAuthSession, - PromoCode, PromoCodeUsage, Proxy, RedeemCode, SecuritySecret, Setting, - SubscriptionPlan, TLSFingerprintProfile, UsageCleanupTask, UsageLog, User, - UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, - UserSubscription []ent.Interceptor + AuthIdentityChannel, ChannelMonitor, ChannelMonitorDailyRollup, + ChannelMonitorHistory, ErrorPassthroughRule, Group, IdempotencyRecord, + IdentityAdoptionDecision, PaymentAuditLog, PaymentOrder, + PaymentProviderInstance, PendingAuthSession, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, SecuritySecret, Setting, SubscriptionPlan, TLSFingerprintProfile, + UsageCleanupTask, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index e03ea74e..71d17624 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -20,6 +20,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" @@ -104,38 +105,39 @@ var ( func checkColumn(t, c string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ - apikey.Table: apikey.ValidColumn, - account.Table: account.ValidColumn, - accountgroup.Table: accountgroup.ValidColumn, - announcement.Table: announcement.ValidColumn, - announcementread.Table: announcementread.ValidColumn, - authidentity.Table: authidentity.ValidColumn, - authidentitychannel.Table: authidentitychannel.ValidColumn, - channelmonitor.Table: channelmonitor.ValidColumn, - channelmonitorhistory.Table: channelmonitorhistory.ValidColumn, - errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, - group.Table: group.ValidColumn, - idempotencyrecord.Table: idempotencyrecord.ValidColumn, - identityadoptiondecision.Table: identityadoptiondecision.ValidColumn, - paymentauditlog.Table: paymentauditlog.ValidColumn, - paymentorder.Table: paymentorder.ValidColumn, - paymentproviderinstance.Table: paymentproviderinstance.ValidColumn, - pendingauthsession.Table: pendingauthsession.ValidColumn, - promocode.Table: promocode.ValidColumn, - promocodeusage.Table: promocodeusage.ValidColumn, - proxy.Table: proxy.ValidColumn, - redeemcode.Table: redeemcode.ValidColumn, - securitysecret.Table: securitysecret.ValidColumn, - setting.Table: setting.ValidColumn, - subscriptionplan.Table: subscriptionplan.ValidColumn, - tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn, - usagecleanuptask.Table: usagecleanuptask.ValidColumn, - usagelog.Table: usagelog.ValidColumn, - user.Table: user.ValidColumn, - userallowedgroup.Table: userallowedgroup.ValidColumn, - userattributedefinition.Table: userattributedefinition.ValidColumn, - userattributevalue.Table: userattributevalue.ValidColumn, - usersubscription.Table: usersubscription.ValidColumn, + apikey.Table: apikey.ValidColumn, + account.Table: account.ValidColumn, + accountgroup.Table: accountgroup.ValidColumn, + announcement.Table: announcement.ValidColumn, + announcementread.Table: announcementread.ValidColumn, + authidentity.Table: authidentity.ValidColumn, + authidentitychannel.Table: authidentitychannel.ValidColumn, + channelmonitor.Table: channelmonitor.ValidColumn, + channelmonitordailyrollup.Table: channelmonitordailyrollup.ValidColumn, + channelmonitorhistory.Table: channelmonitorhistory.ValidColumn, + errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, + group.Table: group.ValidColumn, + idempotencyrecord.Table: idempotencyrecord.ValidColumn, + identityadoptiondecision.Table: identityadoptiondecision.ValidColumn, + paymentauditlog.Table: paymentauditlog.ValidColumn, + paymentorder.Table: paymentorder.ValidColumn, + paymentproviderinstance.Table: paymentproviderinstance.ValidColumn, + pendingauthsession.Table: pendingauthsession.ValidColumn, + promocode.Table: promocode.ValidColumn, + promocodeusage.Table: promocodeusage.ValidColumn, + proxy.Table: proxy.ValidColumn, + redeemcode.Table: redeemcode.ValidColumn, + securitysecret.Table: securitysecret.ValidColumn, + setting.Table: setting.ValidColumn, + subscriptionplan.Table: subscriptionplan.ValidColumn, + tlsfingerprintprofile.Table: tlsfingerprintprofile.ValidColumn, + usagecleanuptask.Table: usagecleanuptask.ValidColumn, + usagelog.Table: usagelog.ValidColumn, + user.Table: user.ValidColumn, + userallowedgroup.Table: userallowedgroup.ValidColumn, + userattributedefinition.Table: userattributedefinition.ValidColumn, + userattributevalue.Table: userattributevalue.ValidColumn, + usersubscription.Table: usersubscription.ValidColumn, }) }) return columnCheck(t, c) diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index e2ffec31..ff86c90d 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -105,6 +105,18 @@ func (f ChannelMonitorFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Val return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorMutation", m) } +// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary +// function as ChannelMonitorDailyRollup mutator. +type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ChannelMonitorDailyRollupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ChannelMonitorDailyRollupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ChannelMonitorDailyRollupMutation", m) +} + // The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary // function as ChannelMonitorHistory mutator. type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 1f11755b..0c83fc38 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -16,6 +16,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" @@ -315,6 +316,33 @@ func (f TraverseChannelMonitor) Traverse(ctx context.Context, q ent.Query) error return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorQuery", q) } +// The ChannelMonitorDailyRollupFunc type is an adapter to allow the use of ordinary function as a Querier. +type ChannelMonitorDailyRollupFunc func(context.Context, *ent.ChannelMonitorDailyRollupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ChannelMonitorDailyRollupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q) +} + +// The TraverseChannelMonitorDailyRollup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseChannelMonitorDailyRollup func(context.Context, *ent.ChannelMonitorDailyRollupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseChannelMonitorDailyRollup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseChannelMonitorDailyRollup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ChannelMonitorDailyRollupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ChannelMonitorDailyRollupQuery", q) +} + // The ChannelMonitorHistoryFunc type is an adapter to allow the use of ordinary function as a Querier. type ChannelMonitorHistoryFunc func(context.Context, *ent.ChannelMonitorHistoryQuery) (ent.Value, error) @@ -982,6 +1010,8 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.AuthIdentityChannelQuery, predicate.AuthIdentityChannel, authidentitychannel.OrderOption]{typ: ent.TypeAuthIdentityChannel, tq: q}, nil case *ent.ChannelMonitorQuery: return &query[*ent.ChannelMonitorQuery, predicate.ChannelMonitor, channelmonitor.OrderOption]{typ: ent.TypeChannelMonitor, tq: q}, nil + case *ent.ChannelMonitorDailyRollupQuery: + return &query[*ent.ChannelMonitorDailyRollupQuery, predicate.ChannelMonitorDailyRollup, channelmonitordailyrollup.OrderOption]{typ: ent.TypeChannelMonitorDailyRollup, tq: q}, nil case *ent.ChannelMonitorHistoryQuery: return &query[*ent.ChannelMonitorHistoryQuery, predicate.ChannelMonitorHistory, channelmonitorhistory.OrderOption]{typ: ent.TypeChannelMonitorHistory, tq: q}, nil case *ent.ErrorPassthroughRuleQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 3dc17fa2..9ce914a3 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -461,9 +461,55 @@ var ( }, }, } + // ChannelMonitorDailyRollupsColumns holds the columns for the "channel_monitor_daily_rollups" table. + ChannelMonitorDailyRollupsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "model", Type: field.TypeString, Size: 200}, + {Name: "bucket_date", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "date"}}, + {Name: "total_checks", Type: field.TypeInt, Default: 0}, + {Name: "ok_count", Type: field.TypeInt, Default: 0}, + {Name: "operational_count", Type: field.TypeInt, Default: 0}, + {Name: "degraded_count", Type: field.TypeInt, Default: 0}, + {Name: "failed_count", Type: field.TypeInt, Default: 0}, + {Name: "error_count", Type: field.TypeInt, Default: 0}, + {Name: "sum_latency_ms", Type: field.TypeInt64, Default: 0}, + {Name: "count_latency", Type: field.TypeInt, Default: 0}, + {Name: "sum_ping_latency_ms", Type: field.TypeInt64, Default: 0}, + {Name: "count_ping_latency", Type: field.TypeInt, Default: 0}, + {Name: "computed_at", Type: field.TypeTime}, + {Name: "monitor_id", Type: field.TypeInt64}, + } + // ChannelMonitorDailyRollupsTable holds the schema information for the "channel_monitor_daily_rollups" table. + ChannelMonitorDailyRollupsTable = &schema.Table{ + Name: "channel_monitor_daily_rollups", + Columns: ChannelMonitorDailyRollupsColumns, + PrimaryKey: []*schema.Column{ChannelMonitorDailyRollupsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "channel_monitor_daily_rollups_channel_monitors_daily_rollups", + Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[15]}, + RefColumns: []*schema.Column{ChannelMonitorsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + Indexes: []*schema.Index{ + { + Name: "channelmonitordailyrollup_monitor_id_model_bucket_date", + Unique: true, + Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[15], ChannelMonitorDailyRollupsColumns[2], ChannelMonitorDailyRollupsColumns[3]}, + }, + { + Name: "channelmonitordailyrollup_bucket_date", + Unique: false, + Columns: []*schema.Column{ChannelMonitorDailyRollupsColumns[3]}, + }, + }, + } // ChannelMonitorHistoriesColumns holds the columns for the "channel_monitor_histories" table. ChannelMonitorHistoriesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "model", Type: field.TypeString, Size: 200}, {Name: "status", Type: field.TypeEnum, Enums: []string{"operational", "degraded", "failed", "error"}}, {Name: "latency_ms", Type: field.TypeInt, Nullable: true}, @@ -480,7 +526,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "channel_monitor_histories_channel_monitors_history", - Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]}, + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[8]}, RefColumns: []*schema.Column{ChannelMonitorsColumns[0]}, OnDelete: schema.Cascade, }, @@ -489,12 +535,12 @@ var ( { Name: "channelmonitorhistory_monitor_id_model_checked_at", Unique: false, - Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7], ChannelMonitorHistoriesColumns[1], ChannelMonitorHistoriesColumns[6]}, + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[8], ChannelMonitorHistoriesColumns[2], ChannelMonitorHistoriesColumns[7]}, }, { Name: "channelmonitorhistory_checked_at", Unique: false, - Columns: []*schema.Column{ChannelMonitorHistoriesColumns[6]}, + Columns: []*schema.Column{ChannelMonitorHistoriesColumns[7]}, }, }, } @@ -1598,6 +1644,7 @@ var ( AuthIdentitiesTable, AuthIdentityChannelsTable, ChannelMonitorsTable, + ChannelMonitorDailyRollupsTable, ChannelMonitorHistoriesTable, ErrorPassthroughRulesTable, GroupsTable, @@ -1659,6 +1706,10 @@ func init() { ChannelMonitorsTable.Annotation = &entsql.Annotation{ Table: "channel_monitors", } + ChannelMonitorDailyRollupsTable.ForeignKeys[0].RefTable = ChannelMonitorsTable + ChannelMonitorDailyRollupsTable.Annotation = &entsql.Annotation{ + Table: "channel_monitor_daily_rollups", + } ChannelMonitorHistoriesTable.ForeignKeys[0].RefTable = ChannelMonitorsTable ChannelMonitorHistoriesTable.Annotation = &entsql.Annotation{ Table: "channel_monitor_histories", diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 528ace5f..e97456fe 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -20,6 +20,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" @@ -57,38 +58,39 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypeAPIKey = "APIKey" - TypeAccount = "Account" - TypeAccountGroup = "AccountGroup" - TypeAnnouncement = "Announcement" - TypeAnnouncementRead = "AnnouncementRead" - TypeAuthIdentity = "AuthIdentity" - TypeAuthIdentityChannel = "AuthIdentityChannel" - TypeChannelMonitor = "ChannelMonitor" - TypeChannelMonitorHistory = "ChannelMonitorHistory" - TypeErrorPassthroughRule = "ErrorPassthroughRule" - TypeGroup = "Group" - TypeIdempotencyRecord = "IdempotencyRecord" - TypeIdentityAdoptionDecision = "IdentityAdoptionDecision" - TypePaymentAuditLog = "PaymentAuditLog" - TypePaymentOrder = "PaymentOrder" - TypePaymentProviderInstance = "PaymentProviderInstance" - TypePendingAuthSession = "PendingAuthSession" - TypePromoCode = "PromoCode" - TypePromoCodeUsage = "PromoCodeUsage" - TypeProxy = "Proxy" - TypeRedeemCode = "RedeemCode" - TypeSecuritySecret = "SecuritySecret" - TypeSetting = "Setting" - TypeSubscriptionPlan = "SubscriptionPlan" - TypeTLSFingerprintProfile = "TLSFingerprintProfile" - TypeUsageCleanupTask = "UsageCleanupTask" - TypeUsageLog = "UsageLog" - TypeUser = "User" - TypeUserAllowedGroup = "UserAllowedGroup" - TypeUserAttributeDefinition = "UserAttributeDefinition" - TypeUserAttributeValue = "UserAttributeValue" - TypeUserSubscription = "UserSubscription" + TypeAPIKey = "APIKey" + TypeAccount = "Account" + TypeAccountGroup = "AccountGroup" + TypeAnnouncement = "Announcement" + TypeAnnouncementRead = "AnnouncementRead" + TypeAuthIdentity = "AuthIdentity" + TypeAuthIdentityChannel = "AuthIdentityChannel" + TypeChannelMonitor = "ChannelMonitor" + TypeChannelMonitorDailyRollup = "ChannelMonitorDailyRollup" + TypeChannelMonitorHistory = "ChannelMonitorHistory" + TypeErrorPassthroughRule = "ErrorPassthroughRule" + TypeGroup = "Group" + TypeIdempotencyRecord = "IdempotencyRecord" + TypeIdentityAdoptionDecision = "IdentityAdoptionDecision" + TypePaymentAuditLog = "PaymentAuditLog" + TypePaymentOrder = "PaymentOrder" + TypePaymentProviderInstance = "PaymentProviderInstance" + TypePendingAuthSession = "PendingAuthSession" + TypePromoCode = "PromoCode" + TypePromoCodeUsage = "PromoCodeUsage" + TypeProxy = "Proxy" + TypeRedeemCode = "RedeemCode" + TypeSecuritySecret = "SecuritySecret" + TypeSetting = "Setting" + TypeSubscriptionPlan = "SubscriptionPlan" + TypeTLSFingerprintProfile = "TLSFingerprintProfile" + TypeUsageCleanupTask = "UsageCleanupTask" + TypeUsageLog = "UsageLog" + TypeUser = "User" + TypeUserAllowedGroup = "UserAllowedGroup" + TypeUserAttributeDefinition = "UserAttributeDefinition" + TypeUserAttributeValue = "UserAttributeValue" + TypeUserSubscription = "UserSubscription" ) // APIKeyMutation represents an operation that mutates the APIKey nodes in the graph. @@ -8741,32 +8743,35 @@ func (m *AuthIdentityChannelMutation) ResetEdge(name string) error { // ChannelMonitorMutation represents an operation that mutates the ChannelMonitor nodes in the graph. type ChannelMonitorMutation struct { config - op Op - typ string - id *int64 - created_at *time.Time - updated_at *time.Time - name *string - provider *channelmonitor.Provider - endpoint *string - api_key_encrypted *string - primary_model *string - extra_models *[]string - appendextra_models []string - group_name *string - enabled *bool - interval_seconds *int - addinterval_seconds *int - last_checked_at *time.Time - created_by *int64 - addcreated_by *int64 - clearedFields map[string]struct{} - history map[int64]struct{} - removedhistory map[int64]struct{} - clearedhistory bool - done bool - oldValue func(context.Context) (*ChannelMonitor, error) - predicates []predicate.ChannelMonitor + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + name *string + provider *channelmonitor.Provider + endpoint *string + api_key_encrypted *string + primary_model *string + extra_models *[]string + appendextra_models []string + group_name *string + enabled *bool + interval_seconds *int + addinterval_seconds *int + last_checked_at *time.Time + created_by *int64 + addcreated_by *int64 + clearedFields map[string]struct{} + history map[int64]struct{} + removedhistory map[int64]struct{} + clearedhistory bool + daily_rollups map[int64]struct{} + removeddaily_rollups map[int64]struct{} + cleareddaily_rollups bool + done bool + oldValue func(context.Context) (*ChannelMonitor, error) + predicates []predicate.ChannelMonitor } var _ ent.Mutation = (*ChannelMonitorMutation)(nil) @@ -9470,6 +9475,60 @@ func (m *ChannelMonitorMutation) ResetHistory() { m.removedhistory = nil } +// AddDailyRollupIDs adds the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by ids. +func (m *ChannelMonitorMutation) AddDailyRollupIDs(ids ...int64) { + if m.daily_rollups == nil { + m.daily_rollups = make(map[int64]struct{}) + } + for i := range ids { + m.daily_rollups[ids[i]] = struct{}{} + } +} + +// ClearDailyRollups clears the "daily_rollups" edge to the ChannelMonitorDailyRollup entity. +func (m *ChannelMonitorMutation) ClearDailyRollups() { + m.cleareddaily_rollups = true +} + +// DailyRollupsCleared reports if the "daily_rollups" edge to the ChannelMonitorDailyRollup entity was cleared. +func (m *ChannelMonitorMutation) DailyRollupsCleared() bool { + return m.cleareddaily_rollups +} + +// RemoveDailyRollupIDs removes the "daily_rollups" edge to the ChannelMonitorDailyRollup entity by IDs. +func (m *ChannelMonitorMutation) RemoveDailyRollupIDs(ids ...int64) { + if m.removeddaily_rollups == nil { + m.removeddaily_rollups = make(map[int64]struct{}) + } + for i := range ids { + delete(m.daily_rollups, ids[i]) + m.removeddaily_rollups[ids[i]] = struct{}{} + } +} + +// RemovedDailyRollups returns the removed IDs of the "daily_rollups" edge to the ChannelMonitorDailyRollup entity. +func (m *ChannelMonitorMutation) RemovedDailyRollupsIDs() (ids []int64) { + for id := range m.removeddaily_rollups { + ids = append(ids, id) + } + return +} + +// DailyRollupsIDs returns the "daily_rollups" edge IDs in the mutation. +func (m *ChannelMonitorMutation) DailyRollupsIDs() (ids []int64) { + for id := range m.daily_rollups { + ids = append(ids, id) + } + return +} + +// ResetDailyRollups resets all changes to the "daily_rollups" edge. +func (m *ChannelMonitorMutation) ResetDailyRollups() { + m.daily_rollups = nil + m.cleareddaily_rollups = false + m.removeddaily_rollups = nil +} + // Where appends a list predicates to the ChannelMonitorMutation builder. func (m *ChannelMonitorMutation) Where(ps ...predicate.ChannelMonitor) { m.predicates = append(m.predicates, ps...) @@ -9849,10 +9908,13 @@ func (m *ChannelMonitorMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *ChannelMonitorMutation) AddedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.history != nil { edges = append(edges, channelmonitor.EdgeHistory) } + if m.daily_rollups != nil { + edges = append(edges, channelmonitor.EdgeDailyRollups) + } return edges } @@ -9866,16 +9928,25 @@ func (m *ChannelMonitorMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case channelmonitor.EdgeDailyRollups: + ids := make([]ent.Value, 0, len(m.daily_rollups)) + for id := range m.daily_rollups { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *ChannelMonitorMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.removedhistory != nil { edges = append(edges, channelmonitor.EdgeHistory) } + if m.removeddaily_rollups != nil { + edges = append(edges, channelmonitor.EdgeDailyRollups) + } return edges } @@ -9889,16 +9960,25 @@ func (m *ChannelMonitorMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case channelmonitor.EdgeDailyRollups: + ids := make([]ent.Value, 0, len(m.removeddaily_rollups)) + for id := range m.removeddaily_rollups { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *ChannelMonitorMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) + edges := make([]string, 0, 2) if m.clearedhistory { edges = append(edges, channelmonitor.EdgeHistory) } + if m.cleareddaily_rollups { + edges = append(edges, channelmonitor.EdgeDailyRollups) + } return edges } @@ -9908,6 +9988,8 @@ func (m *ChannelMonitorMutation) EdgeCleared(name string) bool { switch name { case channelmonitor.EdgeHistory: return m.clearedhistory + case channelmonitor.EdgeDailyRollups: + return m.cleareddaily_rollups } return false } @@ -9927,16 +10009,1511 @@ func (m *ChannelMonitorMutation) ResetEdge(name string) error { case channelmonitor.EdgeHistory: m.ResetHistory() return nil + case channelmonitor.EdgeDailyRollups: + m.ResetDailyRollups() + return nil } return fmt.Errorf("unknown ChannelMonitor edge %s", name) } +// ChannelMonitorDailyRollupMutation represents an operation that mutates the ChannelMonitorDailyRollup nodes in the graph. +type ChannelMonitorDailyRollupMutation struct { + config + op Op + typ string + id *int64 + deleted_at *time.Time + model *string + bucket_date *time.Time + total_checks *int + addtotal_checks *int + ok_count *int + addok_count *int + operational_count *int + addoperational_count *int + degraded_count *int + adddegraded_count *int + failed_count *int + addfailed_count *int + error_count *int + adderror_count *int + sum_latency_ms *int64 + addsum_latency_ms *int64 + count_latency *int + addcount_latency *int + sum_ping_latency_ms *int64 + addsum_ping_latency_ms *int64 + count_ping_latency *int + addcount_ping_latency *int + computed_at *time.Time + clearedFields map[string]struct{} + monitor *int64 + clearedmonitor bool + done bool + oldValue func(context.Context) (*ChannelMonitorDailyRollup, error) + predicates []predicate.ChannelMonitorDailyRollup +} + +var _ ent.Mutation = (*ChannelMonitorDailyRollupMutation)(nil) + +// channelmonitordailyrollupOption allows management of the mutation configuration using functional options. +type channelmonitordailyrollupOption func(*ChannelMonitorDailyRollupMutation) + +// newChannelMonitorDailyRollupMutation creates new mutation for the ChannelMonitorDailyRollup entity. +func newChannelMonitorDailyRollupMutation(c config, op Op, opts ...channelmonitordailyrollupOption) *ChannelMonitorDailyRollupMutation { + m := &ChannelMonitorDailyRollupMutation{ + config: c, + op: op, + typ: TypeChannelMonitorDailyRollup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withChannelMonitorDailyRollupID sets the ID field of the mutation. +func withChannelMonitorDailyRollupID(id int64) channelmonitordailyrollupOption { + return func(m *ChannelMonitorDailyRollupMutation) { + var ( + err error + once sync.Once + value *ChannelMonitorDailyRollup + ) + m.oldValue = func(ctx context.Context) (*ChannelMonitorDailyRollup, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ChannelMonitorDailyRollup.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withChannelMonitorDailyRollup sets the old ChannelMonitorDailyRollup of the mutation. +func withChannelMonitorDailyRollup(node *ChannelMonitorDailyRollup) channelmonitordailyrollupOption { + return func(m *ChannelMonitorDailyRollupMutation) { + m.oldValue = func(context.Context) (*ChannelMonitorDailyRollup, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ChannelMonitorDailyRollupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ChannelMonitorDailyRollupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ChannelMonitorDailyRollupMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ChannelMonitorDailyRollupMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ChannelMonitorDailyRollup.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *ChannelMonitorDailyRollupMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *ChannelMonitorDailyRollupMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[channelmonitordailyrollup.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *ChannelMonitorDailyRollupMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[channelmonitordailyrollup.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *ChannelMonitorDailyRollupMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, channelmonitordailyrollup.FieldDeletedAt) +} + +// SetMonitorID sets the "monitor_id" field. +func (m *ChannelMonitorDailyRollupMutation) SetMonitorID(i int64) { + m.monitor = &i +} + +// MonitorID returns the value of the "monitor_id" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) MonitorID() (r int64, exists bool) { + v := m.monitor + if v == nil { + return + } + return *v, true +} + +// OldMonitorID returns the old "monitor_id" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldMonitorID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonitorID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonitorID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonitorID: %w", err) + } + return oldValue.MonitorID, nil +} + +// ResetMonitorID resets all changes to the "monitor_id" field. +func (m *ChannelMonitorDailyRollupMutation) ResetMonitorID() { + m.monitor = nil +} + +// SetModel sets the "model" field. +func (m *ChannelMonitorDailyRollupMutation) SetModel(s string) { + m.model = &s +} + +// Model returns the value of the "model" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) Model() (r string, exists bool) { + v := m.model + if v == nil { + return + } + return *v, true +} + +// OldModel returns the old "model" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldModel(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldModel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldModel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldModel: %w", err) + } + return oldValue.Model, nil +} + +// ResetModel resets all changes to the "model" field. +func (m *ChannelMonitorDailyRollupMutation) ResetModel() { + m.model = nil +} + +// SetBucketDate sets the "bucket_date" field. +func (m *ChannelMonitorDailyRollupMutation) SetBucketDate(t time.Time) { + m.bucket_date = &t +} + +// BucketDate returns the value of the "bucket_date" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) BucketDate() (r time.Time, exists bool) { + v := m.bucket_date + if v == nil { + return + } + return *v, true +} + +// OldBucketDate returns the old "bucket_date" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldBucketDate(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBucketDate is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBucketDate requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBucketDate: %w", err) + } + return oldValue.BucketDate, nil +} + +// ResetBucketDate resets all changes to the "bucket_date" field. +func (m *ChannelMonitorDailyRollupMutation) ResetBucketDate() { + m.bucket_date = nil +} + +// SetTotalChecks sets the "total_checks" field. +func (m *ChannelMonitorDailyRollupMutation) SetTotalChecks(i int) { + m.total_checks = &i + m.addtotal_checks = nil +} + +// TotalChecks returns the value of the "total_checks" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) TotalChecks() (r int, exists bool) { + v := m.total_checks + if v == nil { + return + } + return *v, true +} + +// OldTotalChecks returns the old "total_checks" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldTotalChecks(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotalChecks is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotalChecks requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotalChecks: %w", err) + } + return oldValue.TotalChecks, nil +} + +// AddTotalChecks adds i to the "total_checks" field. +func (m *ChannelMonitorDailyRollupMutation) AddTotalChecks(i int) { + if m.addtotal_checks != nil { + *m.addtotal_checks += i + } else { + m.addtotal_checks = &i + } +} + +// AddedTotalChecks returns the value that was added to the "total_checks" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedTotalChecks() (r int, exists bool) { + v := m.addtotal_checks + if v == nil { + return + } + return *v, true +} + +// ResetTotalChecks resets all changes to the "total_checks" field. +func (m *ChannelMonitorDailyRollupMutation) ResetTotalChecks() { + m.total_checks = nil + m.addtotal_checks = nil +} + +// SetOkCount sets the "ok_count" field. +func (m *ChannelMonitorDailyRollupMutation) SetOkCount(i int) { + m.ok_count = &i + m.addok_count = nil +} + +// OkCount returns the value of the "ok_count" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) OkCount() (r int, exists bool) { + v := m.ok_count + if v == nil { + return + } + return *v, true +} + +// OldOkCount returns the old "ok_count" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldOkCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOkCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOkCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOkCount: %w", err) + } + return oldValue.OkCount, nil +} + +// AddOkCount adds i to the "ok_count" field. +func (m *ChannelMonitorDailyRollupMutation) AddOkCount(i int) { + if m.addok_count != nil { + *m.addok_count += i + } else { + m.addok_count = &i + } +} + +// AddedOkCount returns the value that was added to the "ok_count" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedOkCount() (r int, exists bool) { + v := m.addok_count + if v == nil { + return + } + return *v, true +} + +// ResetOkCount resets all changes to the "ok_count" field. +func (m *ChannelMonitorDailyRollupMutation) ResetOkCount() { + m.ok_count = nil + m.addok_count = nil +} + +// SetOperationalCount sets the "operational_count" field. +func (m *ChannelMonitorDailyRollupMutation) SetOperationalCount(i int) { + m.operational_count = &i + m.addoperational_count = nil +} + +// OperationalCount returns the value of the "operational_count" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) OperationalCount() (r int, exists bool) { + v := m.operational_count + if v == nil { + return + } + return *v, true +} + +// OldOperationalCount returns the old "operational_count" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldOperationalCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOperationalCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOperationalCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOperationalCount: %w", err) + } + return oldValue.OperationalCount, nil +} + +// AddOperationalCount adds i to the "operational_count" field. +func (m *ChannelMonitorDailyRollupMutation) AddOperationalCount(i int) { + if m.addoperational_count != nil { + *m.addoperational_count += i + } else { + m.addoperational_count = &i + } +} + +// AddedOperationalCount returns the value that was added to the "operational_count" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedOperationalCount() (r int, exists bool) { + v := m.addoperational_count + if v == nil { + return + } + return *v, true +} + +// ResetOperationalCount resets all changes to the "operational_count" field. +func (m *ChannelMonitorDailyRollupMutation) ResetOperationalCount() { + m.operational_count = nil + m.addoperational_count = nil +} + +// SetDegradedCount sets the "degraded_count" field. +func (m *ChannelMonitorDailyRollupMutation) SetDegradedCount(i int) { + m.degraded_count = &i + m.adddegraded_count = nil +} + +// DegradedCount returns the value of the "degraded_count" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) DegradedCount() (r int, exists bool) { + v := m.degraded_count + if v == nil { + return + } + return *v, true +} + +// OldDegradedCount returns the old "degraded_count" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldDegradedCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDegradedCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDegradedCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDegradedCount: %w", err) + } + return oldValue.DegradedCount, nil +} + +// AddDegradedCount adds i to the "degraded_count" field. +func (m *ChannelMonitorDailyRollupMutation) AddDegradedCount(i int) { + if m.adddegraded_count != nil { + *m.adddegraded_count += i + } else { + m.adddegraded_count = &i + } +} + +// AddedDegradedCount returns the value that was added to the "degraded_count" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedDegradedCount() (r int, exists bool) { + v := m.adddegraded_count + if v == nil { + return + } + return *v, true +} + +// ResetDegradedCount resets all changes to the "degraded_count" field. +func (m *ChannelMonitorDailyRollupMutation) ResetDegradedCount() { + m.degraded_count = nil + m.adddegraded_count = nil +} + +// SetFailedCount sets the "failed_count" field. +func (m *ChannelMonitorDailyRollupMutation) SetFailedCount(i int) { + m.failed_count = &i + m.addfailed_count = nil +} + +// FailedCount returns the value of the "failed_count" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) FailedCount() (r int, exists bool) { + v := m.failed_count + if v == nil { + return + } + return *v, true +} + +// OldFailedCount returns the old "failed_count" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldFailedCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFailedCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFailedCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFailedCount: %w", err) + } + return oldValue.FailedCount, nil +} + +// AddFailedCount adds i to the "failed_count" field. +func (m *ChannelMonitorDailyRollupMutation) AddFailedCount(i int) { + if m.addfailed_count != nil { + *m.addfailed_count += i + } else { + m.addfailed_count = &i + } +} + +// AddedFailedCount returns the value that was added to the "failed_count" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedFailedCount() (r int, exists bool) { + v := m.addfailed_count + if v == nil { + return + } + return *v, true +} + +// ResetFailedCount resets all changes to the "failed_count" field. +func (m *ChannelMonitorDailyRollupMutation) ResetFailedCount() { + m.failed_count = nil + m.addfailed_count = nil +} + +// SetErrorCount sets the "error_count" field. +func (m *ChannelMonitorDailyRollupMutation) SetErrorCount(i int) { + m.error_count = &i + m.adderror_count = nil +} + +// ErrorCount returns the value of the "error_count" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) ErrorCount() (r int, exists bool) { + v := m.error_count + if v == nil { + return + } + return *v, true +} + +// OldErrorCount returns the old "error_count" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldErrorCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorCount: %w", err) + } + return oldValue.ErrorCount, nil +} + +// AddErrorCount adds i to the "error_count" field. +func (m *ChannelMonitorDailyRollupMutation) AddErrorCount(i int) { + if m.adderror_count != nil { + *m.adderror_count += i + } else { + m.adderror_count = &i + } +} + +// AddedErrorCount returns the value that was added to the "error_count" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedErrorCount() (r int, exists bool) { + v := m.adderror_count + if v == nil { + return + } + return *v, true +} + +// ResetErrorCount resets all changes to the "error_count" field. +func (m *ChannelMonitorDailyRollupMutation) ResetErrorCount() { + m.error_count = nil + m.adderror_count = nil +} + +// SetSumLatencyMs sets the "sum_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) SetSumLatencyMs(i int64) { + m.sum_latency_ms = &i + m.addsum_latency_ms = nil +} + +// SumLatencyMs returns the value of the "sum_latency_ms" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) SumLatencyMs() (r int64, exists bool) { + v := m.sum_latency_ms + if v == nil { + return + } + return *v, true +} + +// OldSumLatencyMs returns the old "sum_latency_ms" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldSumLatencyMs(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSumLatencyMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSumLatencyMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSumLatencyMs: %w", err) + } + return oldValue.SumLatencyMs, nil +} + +// AddSumLatencyMs adds i to the "sum_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) AddSumLatencyMs(i int64) { + if m.addsum_latency_ms != nil { + *m.addsum_latency_ms += i + } else { + m.addsum_latency_ms = &i + } +} + +// AddedSumLatencyMs returns the value that was added to the "sum_latency_ms" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedSumLatencyMs() (r int64, exists bool) { + v := m.addsum_latency_ms + if v == nil { + return + } + return *v, true +} + +// ResetSumLatencyMs resets all changes to the "sum_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) ResetSumLatencyMs() { + m.sum_latency_ms = nil + m.addsum_latency_ms = nil +} + +// SetCountLatency sets the "count_latency" field. +func (m *ChannelMonitorDailyRollupMutation) SetCountLatency(i int) { + m.count_latency = &i + m.addcount_latency = nil +} + +// CountLatency returns the value of the "count_latency" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) CountLatency() (r int, exists bool) { + v := m.count_latency + if v == nil { + return + } + return *v, true +} + +// OldCountLatency returns the old "count_latency" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldCountLatency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCountLatency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCountLatency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCountLatency: %w", err) + } + return oldValue.CountLatency, nil +} + +// AddCountLatency adds i to the "count_latency" field. +func (m *ChannelMonitorDailyRollupMutation) AddCountLatency(i int) { + if m.addcount_latency != nil { + *m.addcount_latency += i + } else { + m.addcount_latency = &i + } +} + +// AddedCountLatency returns the value that was added to the "count_latency" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedCountLatency() (r int, exists bool) { + v := m.addcount_latency + if v == nil { + return + } + return *v, true +} + +// ResetCountLatency resets all changes to the "count_latency" field. +func (m *ChannelMonitorDailyRollupMutation) ResetCountLatency() { + m.count_latency = nil + m.addcount_latency = nil +} + +// SetSumPingLatencyMs sets the "sum_ping_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) SetSumPingLatencyMs(i int64) { + m.sum_ping_latency_ms = &i + m.addsum_ping_latency_ms = nil +} + +// SumPingLatencyMs returns the value of the "sum_ping_latency_ms" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) SumPingLatencyMs() (r int64, exists bool) { + v := m.sum_ping_latency_ms + if v == nil { + return + } + return *v, true +} + +// OldSumPingLatencyMs returns the old "sum_ping_latency_ms" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldSumPingLatencyMs(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSumPingLatencyMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSumPingLatencyMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSumPingLatencyMs: %w", err) + } + return oldValue.SumPingLatencyMs, nil +} + +// AddSumPingLatencyMs adds i to the "sum_ping_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) AddSumPingLatencyMs(i int64) { + if m.addsum_ping_latency_ms != nil { + *m.addsum_ping_latency_ms += i + } else { + m.addsum_ping_latency_ms = &i + } +} + +// AddedSumPingLatencyMs returns the value that was added to the "sum_ping_latency_ms" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedSumPingLatencyMs() (r int64, exists bool) { + v := m.addsum_ping_latency_ms + if v == nil { + return + } + return *v, true +} + +// ResetSumPingLatencyMs resets all changes to the "sum_ping_latency_ms" field. +func (m *ChannelMonitorDailyRollupMutation) ResetSumPingLatencyMs() { + m.sum_ping_latency_ms = nil + m.addsum_ping_latency_ms = nil +} + +// SetCountPingLatency sets the "count_ping_latency" field. +func (m *ChannelMonitorDailyRollupMutation) SetCountPingLatency(i int) { + m.count_ping_latency = &i + m.addcount_ping_latency = nil +} + +// CountPingLatency returns the value of the "count_ping_latency" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) CountPingLatency() (r int, exists bool) { + v := m.count_ping_latency + if v == nil { + return + } + return *v, true +} + +// OldCountPingLatency returns the old "count_ping_latency" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldCountPingLatency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCountPingLatency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCountPingLatency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCountPingLatency: %w", err) + } + return oldValue.CountPingLatency, nil +} + +// AddCountPingLatency adds i to the "count_ping_latency" field. +func (m *ChannelMonitorDailyRollupMutation) AddCountPingLatency(i int) { + if m.addcount_ping_latency != nil { + *m.addcount_ping_latency += i + } else { + m.addcount_ping_latency = &i + } +} + +// AddedCountPingLatency returns the value that was added to the "count_ping_latency" field in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedCountPingLatency() (r int, exists bool) { + v := m.addcount_ping_latency + if v == nil { + return + } + return *v, true +} + +// ResetCountPingLatency resets all changes to the "count_ping_latency" field. +func (m *ChannelMonitorDailyRollupMutation) ResetCountPingLatency() { + m.count_ping_latency = nil + m.addcount_ping_latency = nil +} + +// SetComputedAt sets the "computed_at" field. +func (m *ChannelMonitorDailyRollupMutation) SetComputedAt(t time.Time) { + m.computed_at = &t +} + +// ComputedAt returns the value of the "computed_at" field in the mutation. +func (m *ChannelMonitorDailyRollupMutation) ComputedAt() (r time.Time, exists bool) { + v := m.computed_at + if v == nil { + return + } + return *v, true +} + +// OldComputedAt returns the old "computed_at" field's value of the ChannelMonitorDailyRollup entity. +// If the ChannelMonitorDailyRollup object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorDailyRollupMutation) OldComputedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldComputedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldComputedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldComputedAt: %w", err) + } + return oldValue.ComputedAt, nil +} + +// ResetComputedAt resets all changes to the "computed_at" field. +func (m *ChannelMonitorDailyRollupMutation) ResetComputedAt() { + m.computed_at = nil +} + +// ClearMonitor clears the "monitor" edge to the ChannelMonitor entity. +func (m *ChannelMonitorDailyRollupMutation) ClearMonitor() { + m.clearedmonitor = true + m.clearedFields[channelmonitordailyrollup.FieldMonitorID] = struct{}{} +} + +// MonitorCleared reports if the "monitor" edge to the ChannelMonitor entity was cleared. +func (m *ChannelMonitorDailyRollupMutation) MonitorCleared() bool { + return m.clearedmonitor +} + +// MonitorIDs returns the "monitor" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// MonitorID instead. It exists only for internal usage by the builders. +func (m *ChannelMonitorDailyRollupMutation) MonitorIDs() (ids []int64) { + if id := m.monitor; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetMonitor resets all changes to the "monitor" edge. +func (m *ChannelMonitorDailyRollupMutation) ResetMonitor() { + m.monitor = nil + m.clearedmonitor = false +} + +// Where appends a list predicates to the ChannelMonitorDailyRollupMutation builder. +func (m *ChannelMonitorDailyRollupMutation) Where(ps ...predicate.ChannelMonitorDailyRollup) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ChannelMonitorDailyRollupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ChannelMonitorDailyRollupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ChannelMonitorDailyRollup, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ChannelMonitorDailyRollupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ChannelMonitorDailyRollupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ChannelMonitorDailyRollup). +func (m *ChannelMonitorDailyRollupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ChannelMonitorDailyRollupMutation) Fields() []string { + fields := make([]string, 0, 15) + if m.deleted_at != nil { + fields = append(fields, channelmonitordailyrollup.FieldDeletedAt) + } + if m.monitor != nil { + fields = append(fields, channelmonitordailyrollup.FieldMonitorID) + } + if m.model != nil { + fields = append(fields, channelmonitordailyrollup.FieldModel) + } + if m.bucket_date != nil { + fields = append(fields, channelmonitordailyrollup.FieldBucketDate) + } + if m.total_checks != nil { + fields = append(fields, channelmonitordailyrollup.FieldTotalChecks) + } + if m.ok_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldOkCount) + } + if m.operational_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldOperationalCount) + } + if m.degraded_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldDegradedCount) + } + if m.failed_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldFailedCount) + } + if m.error_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldErrorCount) + } + if m.sum_latency_ms != nil { + fields = append(fields, channelmonitordailyrollup.FieldSumLatencyMs) + } + if m.count_latency != nil { + fields = append(fields, channelmonitordailyrollup.FieldCountLatency) + } + if m.sum_ping_latency_ms != nil { + fields = append(fields, channelmonitordailyrollup.FieldSumPingLatencyMs) + } + if m.count_ping_latency != nil { + fields = append(fields, channelmonitordailyrollup.FieldCountPingLatency) + } + if m.computed_at != nil { + fields = append(fields, channelmonitordailyrollup.FieldComputedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ChannelMonitorDailyRollupMutation) Field(name string) (ent.Value, bool) { + switch name { + case channelmonitordailyrollup.FieldDeletedAt: + return m.DeletedAt() + case channelmonitordailyrollup.FieldMonitorID: + return m.MonitorID() + case channelmonitordailyrollup.FieldModel: + return m.Model() + case channelmonitordailyrollup.FieldBucketDate: + return m.BucketDate() + case channelmonitordailyrollup.FieldTotalChecks: + return m.TotalChecks() + case channelmonitordailyrollup.FieldOkCount: + return m.OkCount() + case channelmonitordailyrollup.FieldOperationalCount: + return m.OperationalCount() + case channelmonitordailyrollup.FieldDegradedCount: + return m.DegradedCount() + case channelmonitordailyrollup.FieldFailedCount: + return m.FailedCount() + case channelmonitordailyrollup.FieldErrorCount: + return m.ErrorCount() + case channelmonitordailyrollup.FieldSumLatencyMs: + return m.SumLatencyMs() + case channelmonitordailyrollup.FieldCountLatency: + return m.CountLatency() + case channelmonitordailyrollup.FieldSumPingLatencyMs: + return m.SumPingLatencyMs() + case channelmonitordailyrollup.FieldCountPingLatency: + return m.CountPingLatency() + case channelmonitordailyrollup.FieldComputedAt: + return m.ComputedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ChannelMonitorDailyRollupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case channelmonitordailyrollup.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case channelmonitordailyrollup.FieldMonitorID: + return m.OldMonitorID(ctx) + case channelmonitordailyrollup.FieldModel: + return m.OldModel(ctx) + case channelmonitordailyrollup.FieldBucketDate: + return m.OldBucketDate(ctx) + case channelmonitordailyrollup.FieldTotalChecks: + return m.OldTotalChecks(ctx) + case channelmonitordailyrollup.FieldOkCount: + return m.OldOkCount(ctx) + case channelmonitordailyrollup.FieldOperationalCount: + return m.OldOperationalCount(ctx) + case channelmonitordailyrollup.FieldDegradedCount: + return m.OldDegradedCount(ctx) + case channelmonitordailyrollup.FieldFailedCount: + return m.OldFailedCount(ctx) + case channelmonitordailyrollup.FieldErrorCount: + return m.OldErrorCount(ctx) + case channelmonitordailyrollup.FieldSumLatencyMs: + return m.OldSumLatencyMs(ctx) + case channelmonitordailyrollup.FieldCountLatency: + return m.OldCountLatency(ctx) + case channelmonitordailyrollup.FieldSumPingLatencyMs: + return m.OldSumPingLatencyMs(ctx) + case channelmonitordailyrollup.FieldCountPingLatency: + return m.OldCountPingLatency(ctx) + case channelmonitordailyrollup.FieldComputedAt: + return m.OldComputedAt(ctx) + } + return nil, fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorDailyRollupMutation) SetField(name string, value ent.Value) error { + switch name { + case channelmonitordailyrollup.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case channelmonitordailyrollup.FieldMonitorID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonitorID(v) + return nil + case channelmonitordailyrollup.FieldModel: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetModel(v) + return nil + case channelmonitordailyrollup.FieldBucketDate: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBucketDate(v) + return nil + case channelmonitordailyrollup.FieldTotalChecks: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotalChecks(v) + return nil + case channelmonitordailyrollup.FieldOkCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOkCount(v) + return nil + case channelmonitordailyrollup.FieldOperationalCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOperationalCount(v) + return nil + case channelmonitordailyrollup.FieldDegradedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDegradedCount(v) + return nil + case channelmonitordailyrollup.FieldFailedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFailedCount(v) + return nil + case channelmonitordailyrollup.FieldErrorCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorCount(v) + return nil + case channelmonitordailyrollup.FieldSumLatencyMs: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSumLatencyMs(v) + return nil + case channelmonitordailyrollup.FieldCountLatency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCountLatency(v) + return nil + case channelmonitordailyrollup.FieldSumPingLatencyMs: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSumPingLatencyMs(v) + return nil + case channelmonitordailyrollup.FieldCountPingLatency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCountPingLatency(v) + return nil + case channelmonitordailyrollup.FieldComputedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetComputedAt(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedFields() []string { + var fields []string + if m.addtotal_checks != nil { + fields = append(fields, channelmonitordailyrollup.FieldTotalChecks) + } + if m.addok_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldOkCount) + } + if m.addoperational_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldOperationalCount) + } + if m.adddegraded_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldDegradedCount) + } + if m.addfailed_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldFailedCount) + } + if m.adderror_count != nil { + fields = append(fields, channelmonitordailyrollup.FieldErrorCount) + } + if m.addsum_latency_ms != nil { + fields = append(fields, channelmonitordailyrollup.FieldSumLatencyMs) + } + if m.addcount_latency != nil { + fields = append(fields, channelmonitordailyrollup.FieldCountLatency) + } + if m.addsum_ping_latency_ms != nil { + fields = append(fields, channelmonitordailyrollup.FieldSumPingLatencyMs) + } + if m.addcount_ping_latency != nil { + fields = append(fields, channelmonitordailyrollup.FieldCountPingLatency) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ChannelMonitorDailyRollupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case channelmonitordailyrollup.FieldTotalChecks: + return m.AddedTotalChecks() + case channelmonitordailyrollup.FieldOkCount: + return m.AddedOkCount() + case channelmonitordailyrollup.FieldOperationalCount: + return m.AddedOperationalCount() + case channelmonitordailyrollup.FieldDegradedCount: + return m.AddedDegradedCount() + case channelmonitordailyrollup.FieldFailedCount: + return m.AddedFailedCount() + case channelmonitordailyrollup.FieldErrorCount: + return m.AddedErrorCount() + case channelmonitordailyrollup.FieldSumLatencyMs: + return m.AddedSumLatencyMs() + case channelmonitordailyrollup.FieldCountLatency: + return m.AddedCountLatency() + case channelmonitordailyrollup.FieldSumPingLatencyMs: + return m.AddedSumPingLatencyMs() + case channelmonitordailyrollup.FieldCountPingLatency: + return m.AddedCountPingLatency() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ChannelMonitorDailyRollupMutation) AddField(name string, value ent.Value) error { + switch name { + case channelmonitordailyrollup.FieldTotalChecks: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddTotalChecks(v) + return nil + case channelmonitordailyrollup.FieldOkCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddOkCount(v) + return nil + case channelmonitordailyrollup.FieldOperationalCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddOperationalCount(v) + return nil + case channelmonitordailyrollup.FieldDegradedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDegradedCount(v) + return nil + case channelmonitordailyrollup.FieldFailedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddFailedCount(v) + return nil + case channelmonitordailyrollup.FieldErrorCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddErrorCount(v) + return nil + case channelmonitordailyrollup.FieldSumLatencyMs: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSumLatencyMs(v) + return nil + case channelmonitordailyrollup.FieldCountLatency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCountLatency(v) + return nil + case channelmonitordailyrollup.FieldSumPingLatencyMs: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSumPingLatencyMs(v) + return nil + case channelmonitordailyrollup.FieldCountPingLatency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCountPingLatency(v) + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ChannelMonitorDailyRollupMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(channelmonitordailyrollup.FieldDeletedAt) { + fields = append(fields, channelmonitordailyrollup.FieldDeletedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ChannelMonitorDailyRollupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ChannelMonitorDailyRollupMutation) ClearField(name string) error { + switch name { + case channelmonitordailyrollup.FieldDeletedAt: + m.ClearDeletedAt() + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ChannelMonitorDailyRollupMutation) ResetField(name string) error { + switch name { + case channelmonitordailyrollup.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case channelmonitordailyrollup.FieldMonitorID: + m.ResetMonitorID() + return nil + case channelmonitordailyrollup.FieldModel: + m.ResetModel() + return nil + case channelmonitordailyrollup.FieldBucketDate: + m.ResetBucketDate() + return nil + case channelmonitordailyrollup.FieldTotalChecks: + m.ResetTotalChecks() + return nil + case channelmonitordailyrollup.FieldOkCount: + m.ResetOkCount() + return nil + case channelmonitordailyrollup.FieldOperationalCount: + m.ResetOperationalCount() + return nil + case channelmonitordailyrollup.FieldDegradedCount: + m.ResetDegradedCount() + return nil + case channelmonitordailyrollup.FieldFailedCount: + m.ResetFailedCount() + return nil + case channelmonitordailyrollup.FieldErrorCount: + m.ResetErrorCount() + return nil + case channelmonitordailyrollup.FieldSumLatencyMs: + m.ResetSumLatencyMs() + return nil + case channelmonitordailyrollup.FieldCountLatency: + m.ResetCountLatency() + return nil + case channelmonitordailyrollup.FieldSumPingLatencyMs: + m.ResetSumPingLatencyMs() + return nil + case channelmonitordailyrollup.FieldCountPingLatency: + m.ResetCountPingLatency() + return nil + case channelmonitordailyrollup.FieldComputedAt: + m.ResetComputedAt() + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.monitor != nil { + edges = append(edges, channelmonitordailyrollup.EdgeMonitor) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ChannelMonitorDailyRollupMutation) AddedIDs(name string) []ent.Value { + switch name { + case channelmonitordailyrollup.EdgeMonitor: + if id := m.monitor; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ChannelMonitorDailyRollupMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ChannelMonitorDailyRollupMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ChannelMonitorDailyRollupMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedmonitor { + edges = append(edges, channelmonitordailyrollup.EdgeMonitor) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ChannelMonitorDailyRollupMutation) EdgeCleared(name string) bool { + switch name { + case channelmonitordailyrollup.EdgeMonitor: + return m.clearedmonitor + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ChannelMonitorDailyRollupMutation) ClearEdge(name string) error { + switch name { + case channelmonitordailyrollup.EdgeMonitor: + m.ClearMonitor() + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ChannelMonitorDailyRollupMutation) ResetEdge(name string) error { + switch name { + case channelmonitordailyrollup.EdgeMonitor: + m.ResetMonitor() + return nil + } + return fmt.Errorf("unknown ChannelMonitorDailyRollup edge %s", name) +} + // ChannelMonitorHistoryMutation represents an operation that mutates the ChannelMonitorHistory nodes in the graph. type ChannelMonitorHistoryMutation struct { config op Op typ string id *int64 + deleted_at *time.Time model *string status *channelmonitorhistory.Status latency_ms *int @@ -10051,6 +11628,55 @@ func (m *ChannelMonitorHistoryMutation) IDs(ctx context.Context) ([]int64, error } } +// SetDeletedAt sets the "deleted_at" field. +func (m *ChannelMonitorHistoryMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *ChannelMonitorHistoryMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the ChannelMonitorHistory entity. +// If the ChannelMonitorHistory object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ChannelMonitorHistoryMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *ChannelMonitorHistoryMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[channelmonitorhistory.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *ChannelMonitorHistoryMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[channelmonitorhistory.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *ChannelMonitorHistoryMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, channelmonitorhistory.FieldDeletedAt) +} + // SetMonitorID sets the "monitor_id" field. func (m *ChannelMonitorHistoryMutation) SetMonitorID(i int64) { m.monitor = &i @@ -10445,7 +12071,10 @@ func (m *ChannelMonitorHistoryMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *ChannelMonitorHistoryMutation) Fields() []string { - fields := make([]string, 0, 7) + fields := make([]string, 0, 8) + if m.deleted_at != nil { + fields = append(fields, channelmonitorhistory.FieldDeletedAt) + } if m.monitor != nil { fields = append(fields, channelmonitorhistory.FieldMonitorID) } @@ -10475,6 +12104,8 @@ func (m *ChannelMonitorHistoryMutation) Fields() []string { // schema. func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) { switch name { + case channelmonitorhistory.FieldDeletedAt: + return m.DeletedAt() case channelmonitorhistory.FieldMonitorID: return m.MonitorID() case channelmonitorhistory.FieldModel: @@ -10498,6 +12129,8 @@ func (m *ChannelMonitorHistoryMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { + case channelmonitorhistory.FieldDeletedAt: + return m.OldDeletedAt(ctx) case channelmonitorhistory.FieldMonitorID: return m.OldMonitorID(ctx) case channelmonitorhistory.FieldModel: @@ -10521,6 +12154,13 @@ func (m *ChannelMonitorHistoryMutation) OldField(ctx context.Context, name strin // type. func (m *ChannelMonitorHistoryMutation) SetField(name string, value ent.Value) error { switch name { + case channelmonitorhistory.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil case channelmonitorhistory.FieldMonitorID: v, ok := value.(int64) if !ok { @@ -10627,6 +12267,9 @@ func (m *ChannelMonitorHistoryMutation) AddField(name string, value ent.Value) e // mutation. func (m *ChannelMonitorHistoryMutation) ClearedFields() []string { var fields []string + if m.FieldCleared(channelmonitorhistory.FieldDeletedAt) { + fields = append(fields, channelmonitorhistory.FieldDeletedAt) + } if m.FieldCleared(channelmonitorhistory.FieldLatencyMs) { fields = append(fields, channelmonitorhistory.FieldLatencyMs) } @@ -10650,6 +12293,9 @@ func (m *ChannelMonitorHistoryMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *ChannelMonitorHistoryMutation) ClearField(name string) error { switch name { + case channelmonitorhistory.FieldDeletedAt: + m.ClearDeletedAt() + return nil case channelmonitorhistory.FieldLatencyMs: m.ClearLatencyMs() return nil @@ -10667,6 +12313,9 @@ func (m *ChannelMonitorHistoryMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *ChannelMonitorHistoryMutation) ResetField(name string) error { switch name { + case channelmonitorhistory.FieldDeletedAt: + m.ResetDeletedAt() + return nil case channelmonitorhistory.FieldMonitorID: m.ResetMonitorID() return nil diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 256b5f2a..adb9a085 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -30,6 +30,9 @@ type AuthIdentityChannel func(*sql.Selector) // ChannelMonitor is the predicate function for channelmonitor builders. type ChannelMonitor func(*sql.Selector) +// ChannelMonitorDailyRollup is the predicate function for channelmonitordailyrollup builders. +type ChannelMonitorDailyRollup func(*sql.Selector) + // ChannelMonitorHistory is the predicate function for channelmonitorhistory builders. type ChannelMonitorHistory func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index 0183f377..25076444 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -13,6 +13,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/authidentity" "github.com/Wei-Shaw/sub2api/ent/authidentitychannel" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" @@ -520,6 +521,82 @@ func init() { channelmonitorDescIntervalSeconds := channelmonitorFields[8].Descriptor() // channelmonitor.IntervalSecondsValidator is a validator for the "interval_seconds" field. It is called by the builders before save. channelmonitor.IntervalSecondsValidator = channelmonitorDescIntervalSeconds.Validators[0].(func(int) error) + channelmonitordailyrollupMixin := schema.ChannelMonitorDailyRollup{}.Mixin() + channelmonitordailyrollupMixinHooks0 := channelmonitordailyrollupMixin[0].Hooks() + channelmonitordailyrollup.Hooks[0] = channelmonitordailyrollupMixinHooks0[0] + channelmonitordailyrollupMixinInters0 := channelmonitordailyrollupMixin[0].Interceptors() + channelmonitordailyrollup.Interceptors[0] = channelmonitordailyrollupMixinInters0[0] + channelmonitordailyrollupFields := schema.ChannelMonitorDailyRollup{}.Fields() + _ = channelmonitordailyrollupFields + // channelmonitordailyrollupDescModel is the schema descriptor for model field. + channelmonitordailyrollupDescModel := channelmonitordailyrollupFields[1].Descriptor() + // channelmonitordailyrollup.ModelValidator is a validator for the "model" field. It is called by the builders before save. + channelmonitordailyrollup.ModelValidator = func() func(string) error { + validators := channelmonitordailyrollupDescModel.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(model string) error { + for _, fn := range fns { + if err := fn(model); err != nil { + return err + } + } + return nil + } + }() + // channelmonitordailyrollupDescTotalChecks is the schema descriptor for total_checks field. + channelmonitordailyrollupDescTotalChecks := channelmonitordailyrollupFields[3].Descriptor() + // channelmonitordailyrollup.DefaultTotalChecks holds the default value on creation for the total_checks field. + channelmonitordailyrollup.DefaultTotalChecks = channelmonitordailyrollupDescTotalChecks.Default.(int) + // channelmonitordailyrollupDescOkCount is the schema descriptor for ok_count field. + channelmonitordailyrollupDescOkCount := channelmonitordailyrollupFields[4].Descriptor() + // channelmonitordailyrollup.DefaultOkCount holds the default value on creation for the ok_count field. + channelmonitordailyrollup.DefaultOkCount = channelmonitordailyrollupDescOkCount.Default.(int) + // channelmonitordailyrollupDescOperationalCount is the schema descriptor for operational_count field. + channelmonitordailyrollupDescOperationalCount := channelmonitordailyrollupFields[5].Descriptor() + // channelmonitordailyrollup.DefaultOperationalCount holds the default value on creation for the operational_count field. + channelmonitordailyrollup.DefaultOperationalCount = channelmonitordailyrollupDescOperationalCount.Default.(int) + // channelmonitordailyrollupDescDegradedCount is the schema descriptor for degraded_count field. + channelmonitordailyrollupDescDegradedCount := channelmonitordailyrollupFields[6].Descriptor() + // channelmonitordailyrollup.DefaultDegradedCount holds the default value on creation for the degraded_count field. + channelmonitordailyrollup.DefaultDegradedCount = channelmonitordailyrollupDescDegradedCount.Default.(int) + // channelmonitordailyrollupDescFailedCount is the schema descriptor for failed_count field. + channelmonitordailyrollupDescFailedCount := channelmonitordailyrollupFields[7].Descriptor() + // channelmonitordailyrollup.DefaultFailedCount holds the default value on creation for the failed_count field. + channelmonitordailyrollup.DefaultFailedCount = channelmonitordailyrollupDescFailedCount.Default.(int) + // channelmonitordailyrollupDescErrorCount is the schema descriptor for error_count field. + channelmonitordailyrollupDescErrorCount := channelmonitordailyrollupFields[8].Descriptor() + // channelmonitordailyrollup.DefaultErrorCount holds the default value on creation for the error_count field. + channelmonitordailyrollup.DefaultErrorCount = channelmonitordailyrollupDescErrorCount.Default.(int) + // channelmonitordailyrollupDescSumLatencyMs is the schema descriptor for sum_latency_ms field. + channelmonitordailyrollupDescSumLatencyMs := channelmonitordailyrollupFields[9].Descriptor() + // channelmonitordailyrollup.DefaultSumLatencyMs holds the default value on creation for the sum_latency_ms field. + channelmonitordailyrollup.DefaultSumLatencyMs = channelmonitordailyrollupDescSumLatencyMs.Default.(int64) + // channelmonitordailyrollupDescCountLatency is the schema descriptor for count_latency field. + channelmonitordailyrollupDescCountLatency := channelmonitordailyrollupFields[10].Descriptor() + // channelmonitordailyrollup.DefaultCountLatency holds the default value on creation for the count_latency field. + channelmonitordailyrollup.DefaultCountLatency = channelmonitordailyrollupDescCountLatency.Default.(int) + // channelmonitordailyrollupDescSumPingLatencyMs is the schema descriptor for sum_ping_latency_ms field. + channelmonitordailyrollupDescSumPingLatencyMs := channelmonitordailyrollupFields[11].Descriptor() + // channelmonitordailyrollup.DefaultSumPingLatencyMs holds the default value on creation for the sum_ping_latency_ms field. + channelmonitordailyrollup.DefaultSumPingLatencyMs = channelmonitordailyrollupDescSumPingLatencyMs.Default.(int64) + // channelmonitordailyrollupDescCountPingLatency is the schema descriptor for count_ping_latency field. + channelmonitordailyrollupDescCountPingLatency := channelmonitordailyrollupFields[12].Descriptor() + // channelmonitordailyrollup.DefaultCountPingLatency holds the default value on creation for the count_ping_latency field. + channelmonitordailyrollup.DefaultCountPingLatency = channelmonitordailyrollupDescCountPingLatency.Default.(int) + // channelmonitordailyrollupDescComputedAt is the schema descriptor for computed_at field. + channelmonitordailyrollupDescComputedAt := channelmonitordailyrollupFields[13].Descriptor() + // channelmonitordailyrollup.DefaultComputedAt holds the default value on creation for the computed_at field. + channelmonitordailyrollup.DefaultComputedAt = channelmonitordailyrollupDescComputedAt.Default.(func() time.Time) + // channelmonitordailyrollup.UpdateDefaultComputedAt holds the default value on update for the computed_at field. + channelmonitordailyrollup.UpdateDefaultComputedAt = channelmonitordailyrollupDescComputedAt.UpdateDefault.(func() time.Time) + channelmonitorhistoryMixin := schema.ChannelMonitorHistory{}.Mixin() + channelmonitorhistoryMixinHooks0 := channelmonitorhistoryMixin[0].Hooks() + channelmonitorhistory.Hooks[0] = channelmonitorhistoryMixinHooks0[0] + channelmonitorhistoryMixinInters0 := channelmonitorhistoryMixin[0].Interceptors() + channelmonitorhistory.Interceptors[0] = channelmonitorhistoryMixinInters0[0] channelmonitorhistoryFields := schema.ChannelMonitorHistory{}.Fields() _ = channelmonitorhistoryFields // channelmonitorhistoryDescModel is the schema descriptor for model field. diff --git a/backend/ent/schema/channel_monitor.go b/backend/ent/schema/channel_monitor.go index 3fa17319..f6a6578d 100644 --- a/backend/ent/schema/channel_monitor.go +++ b/backend/ent/schema/channel_monitor.go @@ -69,6 +69,8 @@ func (ChannelMonitor) Edges() []ent.Edge { return []ent.Edge{ edge.To("history", ChannelMonitorHistory.Type). Annotations(entsql.OnDelete(entsql.Cascade)), + edge.To("daily_rollups", ChannelMonitorDailyRollup.Type). + Annotations(entsql.OnDelete(entsql.Cascade)), } } diff --git a/backend/ent/schema/channel_monitor_daily_rollup.go b/backend/ent/schema/channel_monitor_daily_rollup.go new file mode 100644 index 00000000..574a28d9 --- /dev/null +++ b/backend/ent/schema/channel_monitor_daily_rollup.go @@ -0,0 +1,73 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" +) + +// ChannelMonitorDailyRollup 按 (monitor_id, model, bucket_date) 维度聚合的渠道监控日统计。 +// 每天的明细被收敛为一行(保留 status 分布 + 延迟和),用于 7d/15d/30d 窗口的可用率 +// 加权计算(avg_latency = sum_latency_ms / count_latency;availability = ok_count / total_checks)。 +type ChannelMonitorDailyRollup struct { + ent.Schema +} + +func (ChannelMonitorDailyRollup) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "channel_monitor_daily_rollups"}, + } +} + +func (ChannelMonitorDailyRollup) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.SoftDeleteMixin{}, + } +} + +func (ChannelMonitorDailyRollup) Fields() []ent.Field { + return []ent.Field{ + field.Int64("monitor_id"), + field.String("model"). + NotEmpty(). + MaxLen(200), + field.Time("bucket_date"). + SchemaType(map[string]string{dialect.Postgres: "date"}), + field.Int("total_checks").Default(0), + field.Int("ok_count").Default(0), + field.Int("operational_count").Default(0), + field.Int("degraded_count").Default(0), + field.Int("failed_count").Default(0), + field.Int("error_count").Default(0), + field.Int64("sum_latency_ms").Default(0), + field.Int("count_latency").Default(0), + field.Int64("sum_ping_latency_ms").Default(0), + field.Int("count_ping_latency").Default(0), + field.Time("computed_at").Default(time.Now).UpdateDefault(time.Now), + } +} + +func (ChannelMonitorDailyRollup) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("monitor", ChannelMonitor.Type). + Ref("daily_rollups"). + Field("monitor_id"). + Unique(). + Required(), + } +} + +func (ChannelMonitorDailyRollup) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("monitor_id", "model", "bucket_date").Unique(), + index.Fields("bucket_date"), + } +} diff --git a/backend/ent/schema/channel_monitor_history.go b/backend/ent/schema/channel_monitor_history.go index 50352016..ec54b34f 100644 --- a/backend/ent/schema/channel_monitor_history.go +++ b/backend/ent/schema/channel_monitor_history.go @@ -9,10 +9,13 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" ) // ChannelMonitorHistory holds the schema definition for the ChannelMonitorHistory entity. -// 渠道监控历史:每次检测每个模型一行记录,由调度器写入,定期清理 30 天前的旧数据。 +// 渠道监控历史:每次检测每个模型一行记录。明细只保留 1 天,超过 1 天的数据被聚合到 +// channel_monitor_daily_rollups 后软删(deleted_at),由后续懒清理任务物理移除。 type ChannelMonitorHistory struct { ent.Schema } @@ -23,6 +26,12 @@ func (ChannelMonitorHistory) Annotations() []schema.Annotation { } } +func (ChannelMonitorHistory) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.SoftDeleteMixin{}, + } +} + func (ChannelMonitorHistory) Fields() []ent.Field { return []ent.Field{ field.Int64("monitor_id"), diff --git a/backend/ent/tx.go b/backend/ent/tx.go index f937270f..0e65a940 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -30,6 +30,8 @@ type Tx struct { AuthIdentityChannel *AuthIdentityChannelClient // ChannelMonitor is the client for interacting with the ChannelMonitor builders. ChannelMonitor *ChannelMonitorClient + // ChannelMonitorDailyRollup is the client for interacting with the ChannelMonitorDailyRollup builders. + ChannelMonitorDailyRollup *ChannelMonitorDailyRollupClient // ChannelMonitorHistory is the client for interacting with the ChannelMonitorHistory builders. ChannelMonitorHistory *ChannelMonitorHistoryClient // ErrorPassthroughRule is the client for interacting with the ErrorPassthroughRule builders. @@ -217,6 +219,7 @@ func (tx *Tx) init() { tx.AuthIdentity = NewAuthIdentityClient(tx.config) tx.AuthIdentityChannel = NewAuthIdentityChannelClient(tx.config) tx.ChannelMonitor = NewChannelMonitorClient(tx.config) + tx.ChannelMonitorDailyRollup = NewChannelMonitorDailyRollupClient(tx.config) tx.ChannelMonitorHistory = NewChannelMonitorHistoryClient(tx.config) tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config) tx.Group = NewGroupClient(tx.config) diff --git a/backend/internal/repository/channel_monitor_repo.go b/backend/internal/repository/channel_monitor_repo.go index cf5e1a93..badbdbca 100644 --- a/backend/internal/repository/channel_monitor_repo.go +++ b/backend/internal/repository/channel_monitor_repo.go @@ -9,6 +9,7 @@ import ( dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/channelmonitor" + "github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup" "github.com/Wei-Shaw/sub2api/ent/channelmonitorhistory" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/lib/pq" @@ -246,6 +247,7 @@ func (r *channelMonitorRepository) ListLatestPerModel(ctx context.Context, monit model, status, latency_ms, ping_latency_ms, checked_at FROM channel_monitor_histories WHERE monitor_id = $1 + AND deleted_at IS NULL ORDER BY model, checked_at DESC ` rows, err := r.db.QueryContext(ctx, q, monitorID) @@ -280,23 +282,48 @@ func assignNullInt(dst **int, n sql.NullInt64) { // ComputeAvailability 计算指定窗口内每个模型的可用率与平均延迟。 // "可用" = status IN (operational, degraded)。 +// +// 数据来源:明细表只保留 1 天;窗口前其余天数走聚合表。 +// - raw = 今天(CURRENT_DATE 起)的未软删明细,按 model 累加 +// - rollup = [CURRENT_DATE - windowDays, CURRENT_DATE) 区间的聚合行 +// +// 总窗口为 "今天 + 过去 windowDays 天",比 windowDays 字面值大 1 天,但因为聚合 +// 是按整 UTC 日切的,这是聚合化无法避免的精度损失,且偏宽不偏窄(数据更全)。 func (r *channelMonitorRepository) ComputeAvailability(ctx context.Context, monitorID int64, windowDays int) ([]*service.ChannelMonitorAvailability, error) { if windowDays <= 0 { windowDays = 7 } const q = ` - SELECT - model, - COUNT(*) AS total_checks, - COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, - AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms - FROM channel_monitor_histories - WHERE monitor_id = $1 - AND checked_at >= $2 + WITH raw AS ( + SELECT model, + COUNT(*) AS total_checks, + COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count, + COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms, + COUNT(latency_ms) AS count_latency + FROM channel_monitor_histories + WHERE monitor_id = $1 + AND deleted_at IS NULL + AND checked_at >= CURRENT_DATE + GROUP BY model + ), + rollup AS ( + SELECT model, total_checks, ok_count, sum_latency_ms, count_latency + FROM channel_monitor_daily_rollups + WHERE monitor_id = $1 + AND deleted_at IS NULL + AND bucket_date >= (CURRENT_DATE - $2::int) + AND bucket_date < CURRENT_DATE + ) + SELECT model, + SUM(total_checks) AS total, + SUM(ok_count) AS ok, + CASE WHEN SUM(count_latency) > 0 + THEN SUM(sum_latency_ms)::float8 / SUM(count_latency) + ELSE NULL END AS avg_latency_ms + FROM (SELECT * FROM raw UNION ALL SELECT * FROM rollup) combined GROUP BY model ` - from := time.Now().AddDate(0, 0, -windowDays) - rows, err := r.db.QueryContext(ctx, q, monitorID, from) + rows, err := r.db.QueryContext(ctx, q, monitorID, windowDays) if err != nil { return nil, fmt.Errorf("query availability: %w", err) } @@ -349,6 +376,7 @@ func (r *channelMonitorRepository) ListLatestForMonitorIDs(ctx context.Context, monitor_id, model, status, latency_ms, ping_latency_ms, checked_at FROM channel_monitor_histories WHERE monitor_id = ANY($1) + AND deleted_at IS NULL ORDER BY monitor_id, model, checked_at DESC ` rows, err := r.db.QueryContext(ctx, q, pq.Array(ids)) @@ -409,6 +437,7 @@ func (r *channelMonitorRepository) ListRecentHistoryForMonitors( FROM channel_monitor_histories h JOIN targets t ON t.monitor_id = h.monitor_id AND t.model = h.model + WHERE h.deleted_at IS NULL ) SELECT monitor_id, status, latency_ms, ping_latency_ms, checked_at FROM ranked @@ -476,6 +505,7 @@ func clampTimelineLimit(n int) int { } // ComputeAvailabilityForMonitors 一次性计算多个监控在某个窗口内的每模型可用率与平均延迟。 +// 与单 monitor 版本同构:明细只覆盖今天,更早走聚合表 UNION 合并。 func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Context, ids []int64, windowDays int) (map[int64][]*service.ChannelMonitorAvailability, error) { out := make(map[int64][]*service.ChannelMonitorAvailability, len(ids)) if len(ids) == 0 { @@ -485,19 +515,38 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co windowDays = 7 } const q = ` - SELECT - monitor_id, - model, - COUNT(*) AS total_checks, - COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_checks, - AVG(latency_ms) FILTER (WHERE latency_ms IS NOT NULL) AS avg_latency_ms - FROM channel_monitor_histories - WHERE monitor_id = ANY($1) - AND checked_at >= $2 + WITH raw AS ( + SELECT monitor_id, + model, + COUNT(*) AS total_checks, + COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count, + COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms, + COUNT(latency_ms) AS count_latency + FROM channel_monitor_histories + WHERE monitor_id = ANY($1) + AND deleted_at IS NULL + AND checked_at >= CURRENT_DATE + GROUP BY monitor_id, model + ), + rollup AS ( + SELECT monitor_id, model, total_checks, ok_count, sum_latency_ms, count_latency + FROM channel_monitor_daily_rollups + WHERE monitor_id = ANY($1) + AND deleted_at IS NULL + AND bucket_date >= (CURRENT_DATE - $2::int) + AND bucket_date < CURRENT_DATE + ) + SELECT monitor_id, + model, + SUM(total_checks) AS total, + SUM(ok_count) AS ok, + CASE WHEN SUM(count_latency) > 0 + THEN SUM(sum_latency_ms)::float8 / SUM(count_latency) + ELSE NULL END AS avg_latency_ms + FROM (SELECT * FROM raw UNION ALL SELECT * FROM rollup) combined GROUP BY monitor_id, model ` - from := time.Now().AddDate(0, 0, -windowDays) - rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), from) + rows, err := r.db.QueryContext(ctx, q, pq.Array(ids), windowDays) if err != nil { return nil, fmt.Errorf("query availability batch: %w", err) } @@ -521,6 +570,116 @@ func (r *channelMonitorRepository) ComputeAvailabilityForMonitors(ctx context.Co return out, nil } +// ---------- 聚合维护 ---------- + +// UpsertDailyRollupsFor 把 targetDate 当天([targetDate, targetDate+1d))未软删的明细 +// 按 (monitor_id, model, bucket_date) 聚合写入 channel_monitor_daily_rollups。 +// - 用 ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE 实现幂等回填, +// 重复执行只会用最新统计覆盖; +// - 同时把 deleted_at 重置为 NULL,避免历史误删后聚合行被持续过滤掉; +// - $1::date 让 PG 自动把入参 truncate 到 UTC 日期,调用方不需要预处理 targetDate。 +func (r *channelMonitorRepository) UpsertDailyRollupsFor(ctx context.Context, targetDate time.Time) (int64, error) { + const q = ` + INSERT INTO channel_monitor_daily_rollups ( + monitor_id, model, bucket_date, + total_checks, ok_count, + operational_count, degraded_count, failed_count, error_count, + sum_latency_ms, count_latency, + sum_ping_latency_ms, count_ping_latency, + computed_at + ) + SELECT + monitor_id, + model, + $1::date AS bucket_date, + COUNT(*) AS total_checks, + COUNT(*) FILTER (WHERE status IN ('operational','degraded')) AS ok_count, + COUNT(*) FILTER (WHERE status = 'operational') AS operational_count, + COUNT(*) FILTER (WHERE status = 'degraded') AS degraded_count, + COUNT(*) FILTER (WHERE status = 'failed') AS failed_count, + COUNT(*) FILTER (WHERE status = 'error') AS error_count, + COALESCE(SUM(latency_ms) FILTER (WHERE latency_ms IS NOT NULL), 0) AS sum_latency_ms, + COUNT(latency_ms) AS count_latency, + COALESCE(SUM(ping_latency_ms) FILTER (WHERE ping_latency_ms IS NOT NULL), 0) AS sum_ping_latency_ms, + COUNT(ping_latency_ms) AS count_ping_latency, + NOW() + FROM channel_monitor_histories + WHERE deleted_at IS NULL + AND checked_at >= $1::date + AND checked_at < ($1::date + INTERVAL '1 day') + GROUP BY monitor_id, model + ON CONFLICT (monitor_id, model, bucket_date) DO UPDATE SET + total_checks = EXCLUDED.total_checks, + ok_count = EXCLUDED.ok_count, + operational_count = EXCLUDED.operational_count, + degraded_count = EXCLUDED.degraded_count, + failed_count = EXCLUDED.failed_count, + error_count = EXCLUDED.error_count, + sum_latency_ms = EXCLUDED.sum_latency_ms, + count_latency = EXCLUDED.count_latency, + sum_ping_latency_ms = EXCLUDED.sum_ping_latency_ms, + count_ping_latency = EXCLUDED.count_ping_latency, + computed_at = NOW(), + deleted_at = NULL + ` + res, err := r.db.ExecContext(ctx, q, targetDate) + if err != nil { + return 0, fmt.Errorf("upsert daily rollups for %s: %w", targetDate.Format("2006-01-02"), err) + } + n, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("rows affected (upsert rollups): %w", err) + } + return n, nil +} + +// DeleteRollupsBefore 软删 bucket_date < beforeDate 的聚合行。 +// 走 ent client,利用 SoftDeleteMixin 把 DELETE 自动改写为 UPDATE deleted_at = NOW()。 +func (r *channelMonitorRepository) DeleteRollupsBefore(ctx context.Context, beforeDate time.Time) (int64, error) { + client := clientFromContext(ctx, r.client) + n, err := client.ChannelMonitorDailyRollup.Delete(). + Where(channelmonitordailyrollup.BucketDateLT(beforeDate)). + Exec(ctx) + if err != nil { + return 0, fmt.Errorf("delete rollups before: %w", err) + } + return int64(n), nil +} + +// LoadAggregationWatermark 读 watermark 表(id=1)。 +// watermark 表不是 ent schema(只有一行),直接走原生 SQL。 +// - 行不存在或 last_aggregated_date IS NULL:返回 (nil, nil),由调用方决定首次回填策略 +func (r *channelMonitorRepository) LoadAggregationWatermark(ctx context.Context) (*time.Time, error) { + const q = `SELECT last_aggregated_date FROM channel_monitor_aggregation_watermark WHERE id = 1` + var t sql.NullTime + if err := r.db.QueryRowContext(ctx, q).Scan(&t); err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, fmt.Errorf("load aggregation watermark: %w", err) + } + if !t.Valid { + return nil, nil + } + return &t.Time, nil +} + +// UpdateAggregationWatermark 更新 watermark(UPSERT 到 id=1)。 +// $1::date 让 PG 把入参 truncate 到 UTC 日期,与 last_aggregated_date 列的 DATE 类型一致。 +func (r *channelMonitorRepository) UpdateAggregationWatermark(ctx context.Context, date time.Time) error { + const q = ` + INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at) + VALUES (1, $1::date, NOW()) + ON CONFLICT (id) DO UPDATE SET + last_aggregated_date = EXCLUDED.last_aggregated_date, + updated_at = NOW() + ` + if _, err := r.db.ExecContext(ctx, q, date); err != nil { + return fmt.Errorf("update aggregation watermark: %w", err) + } + return nil +} + // ---------- helpers ---------- func entToServiceMonitor(row *dbent.ChannelMonitor) *service.ChannelMonitor { diff --git a/backend/internal/service/channel_monitor_const.go b/backend/internal/service/channel_monitor_const.go index 7255e4be..b61f3bdd 100644 --- a/backend/internal/service/channel_monitor_const.go +++ b/backend/internal/service/channel_monitor_const.go @@ -15,8 +15,16 @@ const ( monitorPingTimeout = 8 * time.Second // monitorDegradedThreshold 主请求成功但耗时超过该阈值视为 degraded。 monitorDegradedThreshold = 6 * time.Second - // monitorHistoryRetentionDays 历史保留天数(每天清理一次)。 - monitorHistoryRetentionDays = 30 + // monitorHistoryRetentionDays 明细历史保留天数。 + // 明细只保留 1 天,超出由 SoftDeleteMixin 软删; + // 维护任务每天凌晨跑(由 OpsCleanupService 统一调度)。 + monitorHistoryRetentionDays = 1 + // monitorRollupRetentionDays 日聚合保留天数。 + // 日聚合行由 RunDailyMaintenance 在超过该窗口后软删。 + monitorRollupRetentionDays = 30 + // monitorMaintenanceMaxDaysPerRun 单次维护任务最多聚合的天数。 + // 用于限制首次上线回填(30 天)+ 少量余量,避免长事务。 + monitorMaintenanceMaxDaysPerRun = 35 // monitorWorkerConcurrency 调度器并发执行的监控数(pond 池容量)。 monitorWorkerConcurrency = 5 // monitorTickerInterval 调度器扫描"到期监控"的间隔。 @@ -55,11 +63,6 @@ const ( monitorAvailability15Days = 15 monitorAvailability30Days = 30 - // monitorCleanupCheckInterval 历史清理调度器的检查频率(每小时检查"是否到 03:00")。 - monitorCleanupCheckInterval = time.Hour - // monitorCleanupHour 凌晨 3 点执行历史清理。 - monitorCleanupHour = 3 - // MonitorHistoryDefaultLimit 历史查询默认返回条数(handler 层共享)。 MonitorHistoryDefaultLimit = 100 // MonitorHistoryMaxLimit 历史查询最大返回条数(handler 层共享)。 @@ -82,10 +85,6 @@ const ( monitorListDueTimeout = 10 * time.Second // monitorRunOneBuffer runOne 的总超时缓冲(除请求超时与 ping 超时外的额外裕量)。 monitorRunOneBuffer = 10 * time.Second - // monitorCleanupTimeout 历史清理任务的总超时。 - monitorCleanupTimeout = 30 * time.Second - // monitorCleanupDayLayout 历史清理用于"今日是否已跑过"判定的日期格式。 - monitorCleanupDayLayout = "2006-01-02" // monitorIdleConnTimeout HTTP transport 空闲连接关闭超时。 monitorIdleConnTimeout = 30 * time.Second diff --git a/backend/internal/service/channel_monitor_runner.go b/backend/internal/service/channel_monitor_runner.go index 4655e6df..21dca8ab 100644 --- a/backend/internal/service/channel_monitor_runner.go +++ b/backend/internal/service/channel_monitor_runner.go @@ -14,10 +14,10 @@ import ( // 职责: // - 每 monitorTickerInterval 扫描一次"到期需要检测"的监控 // - 通过 pond 池(容量 monitorWorkerConcurrency)异步执行检测 -// - 每小时检查一次时钟,到 monitorCleanupHour 点时执行历史清理 // - Stop 时优雅关闭:池 drain + ticker.Stop + wg.Wait // -// 不引入 cron 库;清理调度通过"每小时检查时间"实现,足够 MVP。 +// 历史清理与日聚合维护不再由 runner 负责,由 OpsCleanupService 的统一 cron +// 在凌晨触发 ChannelMonitorService.RunDailyMaintenance(复用 leader lock + heartbeat)。 // // 定时任务维护:删除/创建/编辑 monitor 无需显式 reload,每个 tick 都会重新查 DB // (ListEnabled + listDueForCheck),新 monitor 的 LastCheckedAt 为 nil 天然立即到期, @@ -35,10 +35,6 @@ type ChannelMonitorRunner struct { // 防止单次检测耗时 > interval 时同一 monitor 被并发执行。 inFlight map[int64]struct{} inFlightMu sync.Mutex - - // 清理状态:lastCleanupDay 记录上次清理的"年-月-日",避免同一天重复跑。 - lastCleanupDay string - cleanupMu sync.Mutex } // NewChannelMonitorRunner 构造调度器。Start 在 wire 中调用。 @@ -52,7 +48,7 @@ func NewChannelMonitorRunner(svc *ChannelMonitorService, settingService *Setting } } -// Start 启动 ticker + worker pool + cleanup loop。 +// Start 启动 ticker + worker pool。 // 调用方需保证只调一次(wire ProvideChannelMonitorRunner 内只调一次)。 func (r *ChannelMonitorRunner) Start() { if r == nil || r.svc == nil { @@ -61,12 +57,11 @@ func (r *ChannelMonitorRunner) Start() { // 容量 5 的 pond 池:超出时调用方等待,避免调度堆积无限增长。 r.pool = pond.NewPool(monitorWorkerConcurrency) - r.wg.Add(2) + r.wg.Add(1) go r.dueCheckLoop() - go r.cleanupLoop() } -// Stop 优雅停止:close stopCh -> 等待两个 loop 退出 -> 池 drain。 +// Stop 优雅停止:close stopCh -> 等待 loop 退出 -> 池 drain。 func (r *ChannelMonitorRunner) Stop() { if r == nil { return @@ -176,45 +171,3 @@ func (r *ChannelMonitorRunner) runOne(id int64, name string) { "monitor_id", id, "name", name, "error", err) } } - -// cleanupLoop 每小时检查当前时间,到 monitorCleanupHour 点(且当天还没清理过)则跑一次清理。 -// 启动时立即检查一次,避免长时间运行才跑首次清理。 -func (r *ChannelMonitorRunner) cleanupLoop() { - defer r.wg.Done() - - ticker := time.NewTicker(monitorCleanupCheckInterval) - defer ticker.Stop() - - r.maybeRunCleanup() - for { - select { - case <-r.stopCh: - return - case <-ticker.C: - r.maybeRunCleanup() - } - } -} - -// maybeRunCleanup 如果当前小时是 monitorCleanupHour 且当天未跑过,则执行清理。 -func (r *ChannelMonitorRunner) maybeRunCleanup() { - now := time.Now() - if now.Hour() != monitorCleanupHour { - return - } - day := now.Format(monitorCleanupDayLayout) - - r.cleanupMu.Lock() - if r.lastCleanupDay == day { - r.cleanupMu.Unlock() - return - } - r.lastCleanupDay = day - r.cleanupMu.Unlock() - - ctx, cancel := context.WithTimeout(context.Background(), monitorCleanupTimeout) - defer cancel() - if err := r.svc.cleanupOldHistory(ctx); err != nil { - slog.Warn("channel_monitor: cleanup history failed", "error", err) - } -} diff --git a/backend/internal/service/channel_monitor_service.go b/backend/internal/service/channel_monitor_service.go index 957ace15..144c66a0 100644 --- a/backend/internal/service/channel_monitor_service.go +++ b/backend/internal/service/channel_monitor_service.go @@ -41,6 +41,20 @@ type ChannelMonitorRepository interface { // ListRecentHistoryForMonitors 批量取多个 monitor 各自主模型(primaryModels[monitorID])最近 perMonitorLimit 条历史。 // 返回的 entry 已按 checked_at DESC 排序(最新在前),不含 message 字段。 ListRecentHistoryForMonitors(ctx context.Context, ids []int64, primaryModels map[int64]string, perMonitorLimit int) (map[int64][]*ChannelMonitorHistoryEntry, error) + + // ---------- 聚合维护(OpsCleanupService 调用) ---------- + + // UpsertDailyRollupsFor 把 targetDate 当天的明细按 (monitor_id, model, bucket_date) + // 聚合到 channel_monitor_daily_rollups。targetDate 会被截断到日期; + // 用 ON CONFLICT DO UPDATE 实现幂等回填,返回 upsert 影响的行数。 + UpsertDailyRollupsFor(ctx context.Context, targetDate time.Time) (int64, error) + // DeleteRollupsBefore 软删 bucket_date < beforeDate 的聚合行,返回删除行数。 + DeleteRollupsBefore(ctx context.Context, beforeDate time.Time) (int64, error) + // LoadAggregationWatermark 读 watermark(id=1)。 + // 返回 nil 表示从未聚合过;watermark 表本身预期已存在单行(migration 110 写入)。 + LoadAggregationWatermark(ctx context.Context) (*time.Time, error) + // UpdateAggregationWatermark 写 watermark(UPSERT 到 id=1)。 + UpdateAggregationWatermark(ctx context.Context, date time.Time) error } // ChannelMonitorService 渠道监控管理服务。 @@ -300,9 +314,10 @@ func (s *ChannelMonitorService) listDueForCheck(ctx context.Context) ([]*Channel return due, nil } -// cleanupOldHistory 删除 monitorHistoryRetentionDays 天之前的历史记录。 +// cleanupOldHistory 删除 monitorHistoryRetentionDays 天之前的明细历史记录。 +// 由 RunDailyMaintenance 调用;SoftDeleteMixin 自动把 DELETE 改为 UPDATE deleted_at。 func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error { - before := time.Now().AddDate(0, 0, -monitorHistoryRetentionDays) + before := time.Now().UTC().AddDate(0, 0, -monitorHistoryRetentionDays) deleted, err := s.repo.DeleteHistoryBefore(ctx, before) if err != nil { return fmt.Errorf("delete history before %s: %w", before.Format(time.RFC3339), err) @@ -314,6 +329,94 @@ func (s *ChannelMonitorService) cleanupOldHistory(ctx context.Context) error { return nil } +// RunDailyMaintenance 每日维护任务:聚合昨天之前未聚合的明细,软删过期明细和聚合。 +// 由 OpsCleanupService 的 cron 调度触发(共享 schedule 和 leader lock)。 +// +// 幂等性: +// - watermark 保证已聚合的日期不会重复处理; +// - UpsertDailyRollupsFor 内部使用 ON CONFLICT DO UPDATE,同一日重复跑结果一致。 +// +// 每一步失败都只记 slog.Warn,整体函数始终返回 nil 让后续步骤能继续跑 +// (与 OpsCleanupService.runCleanupOnce 风格一致)。 +func (s *ChannelMonitorService) RunDailyMaintenance(ctx context.Context) error { + now := time.Now().UTC() + today := now.Truncate(24 * time.Hour) + + if err := s.runDailyAggregation(ctx, today); err != nil { + slog.Warn("channel_monitor: maintenance step failed", + "step", "aggregate", "error", err) + } + if err := s.cleanupOldHistory(ctx); err != nil { + slog.Warn("channel_monitor: maintenance step failed", + "step", "prune_history", "error", err) + } + if err := s.cleanupOldRollups(ctx, today); err != nil { + slog.Warn("channel_monitor: maintenance step failed", + "step", "prune_rollups", "error", err) + } + return nil +} + +// runDailyAggregation 从 watermark+1 聚合到昨天(UTC)。 +// 首次跑(watermark nil):从 today-monitorRollupRetentionDays 开始回填。 +// 每次最多聚合 monitorMaintenanceMaxDaysPerRun 天,避免长事务。 +func (s *ChannelMonitorService) runDailyAggregation(ctx context.Context, today time.Time) error { + watermark, err := s.repo.LoadAggregationWatermark(ctx) + if err != nil { + return fmt.Errorf("load watermark: %w", err) + } + + start := s.resolveAggregationStart(watermark, today) + if !start.Before(today) { + return nil // 没有需要聚合的日期 + } + + iterations := 0 + for d := start; d.Before(today); d = d.Add(24 * time.Hour) { + if iterations >= monitorMaintenanceMaxDaysPerRun { + slog.Info("channel_monitor: maintenance aggregation capped", + "max_days", monitorMaintenanceMaxDaysPerRun, + "next_resume", d.Format("2006-01-02")) + break + } + affected, upErr := s.repo.UpsertDailyRollupsFor(ctx, d) + if upErr != nil { + return fmt.Errorf("upsert rollups for %s: %w", d.Format("2006-01-02"), upErr) + } + if err := s.repo.UpdateAggregationWatermark(ctx, d); err != nil { + return fmt.Errorf("update watermark to %s: %w", d.Format("2006-01-02"), err) + } + slog.Info("channel_monitor: rollups upserted", + "date", d.Format("2006-01-02"), "affected_rows", affected) + iterations++ + } + return nil +} + +// resolveAggregationStart 计算本次聚合起点: +// - watermark == nil:today - monitorRollupRetentionDays(首次回填最多 30 天) +// - watermark != nil:*watermark + 1 day +func (s *ChannelMonitorService) resolveAggregationStart(watermark *time.Time, today time.Time) time.Time { + if watermark == nil { + return today.AddDate(0, 0, -monitorRollupRetentionDays) + } + return watermark.UTC().Truncate(24 * time.Hour).Add(24 * time.Hour) +} + +// cleanupOldRollups 软删 bucket_date < today - monitorRollupRetentionDays 的日聚合行。 +func (s *ChannelMonitorService) cleanupOldRollups(ctx context.Context, today time.Time) error { + cutoff := today.AddDate(0, 0, -monitorRollupRetentionDays) + deleted, err := s.repo.DeleteRollupsBefore(ctx, cutoff) + if err != nil { + return fmt.Errorf("delete rollups before %s: %w", cutoff.Format("2006-01-02"), err) + } + if deleted > 0 { + slog.Info("channel_monitor: rollups cleanup", + "deleted_rows", deleted, "before", cutoff.Format("2006-01-02")) + } + return nil +} + // ---------- helpers ---------- // decryptInPlace 把 ChannelMonitor.APIKey 从密文解密为明文。 diff --git a/backend/internal/service/ops_cleanup_service.go b/backend/internal/service/ops_cleanup_service.go index 1cae6fe5..08a10a02 100644 --- a/backend/internal/service/ops_cleanup_service.go +++ b/backend/internal/service/ops_cleanup_service.go @@ -36,11 +36,15 @@ return 0 // - Scheduling: 5-field cron spec (minute hour dom month dow). // - Multi-instance: best-effort Redis leader lock so only one node runs cleanup. // - Safety: deletes in batches to avoid long transactions. +// +// 附带:在 runCleanupOnce 末尾调用 ChannelMonitorService.RunDailyMaintenance, +// 统一共享 cron schedule + leader lock + heartbeat,避免再引一套调度。 type OpsCleanupService struct { - opsRepo OpsRepository - db *sql.DB - redisClient *redis.Client - cfg *config.Config + opsRepo OpsRepository + db *sql.DB + redisClient *redis.Client + cfg *config.Config + channelMonitorSvc *ChannelMonitorService instanceID string @@ -57,13 +61,15 @@ func NewOpsCleanupService( db *sql.DB, redisClient *redis.Client, cfg *config.Config, + channelMonitorSvc *ChannelMonitorService, ) *OpsCleanupService { return &OpsCleanupService{ - opsRepo: opsRepo, - db: db, - redisClient: redisClient, - cfg: cfg, - instanceID: uuid.NewString(), + opsRepo: opsRepo, + db: db, + redisClient: redisClient, + cfg: cfg, + channelMonitorSvc: channelMonitorSvc, + instanceID: uuid.NewString(), } } @@ -248,6 +254,15 @@ func (s *OpsCleanupService) runCleanupOnce(ctx context.Context) (opsCleanupDelet out.dailyPreagg = n } + // Channel monitor 每日维护(聚合昨日明细 + 软删过期明细/聚合)。 + // 失败只记日志,不影响 ops 清理的成功状态(与 ops 各步骤风格一致); + // 维护本身已经把每步错误打到 slog,heartbeat result 不再分项记录。 + if s.channelMonitorSvc != nil { + if err := s.channelMonitorSvc.RunDailyMaintenance(ctx); err != nil { + logger.LegacyPrintf("service.ops_cleanup", "[OpsCleanup] channel monitor maintenance failed: %v", err) + } + } + return out, nil } diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 5d8d88d2..1482d650 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -262,13 +262,16 @@ func ProvideOpsAlertEvaluatorService( } // ProvideOpsCleanupService creates and starts OpsCleanupService (cron scheduled). +// channelMonitorSvc 让维护任务(聚合 + 历史/聚合软删)跟随 ops 清理 cron 一起跑, +// 共享 leader lock + heartbeat。 func ProvideOpsCleanupService( opsRepo OpsRepository, db *sql.DB, redisClient *redis.Client, cfg *config.Config, + channelMonitorSvc *ChannelMonitorService, ) *OpsCleanupService { - svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg) + svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg, channelMonitorSvc) svc.Start() return svc } diff --git a/backend/migrations/126_add_channel_monitor_aggregation.sql b/backend/migrations/126_add_channel_monitor_aggregation.sql new file mode 100644 index 00000000..e643763c --- /dev/null +++ b/backend/migrations/126_add_channel_monitor_aggregation.sql @@ -0,0 +1,60 @@ +-- Migration: 126_add_channel_monitor_aggregation +-- 渠道监控日聚合:把 channel_monitor_histories 的明细按天聚合,明细只保留 1 天, +-- 聚合保留 30 天。明细和聚合表都用软删除(deleted_at),由 ops cleanup 任务每天 +-- 凌晨随运维监控清理一起跑(共享 cron)。 +-- +-- 设计要点: +-- - channel_monitor_histories 加 deleted_at 软删除字段(SoftDeleteMixin 全局 +-- Hook 会把 DELETE 自动改写成 UPDATE deleted_at = NOW())。 +-- - channel_monitor_daily_rollups 按 (monitor_id, model, bucket_date) 唯一, +-- 用 ON CONFLICT DO UPDATE 实现幂等回填,状态分布和延迟分子分母都保留, +-- 方便后续按窗口任意求加权可用率和均值。 +-- - watermark 表只有一行(id=1),记录最近一次聚合到达的日期,避免重启后重复 +-- 扫全表。 +-- - rollup 上 (bucket_date) 索引服务清理任务的 DELETE WHERE bucket_date < cutoff。 + +-- 1) 给历史明细表加软删除字段 +ALTER TABLE channel_monitor_histories + ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ; + +CREATE INDEX IF NOT EXISTS idx_channel_monitor_histories_deleted_at + ON channel_monitor_histories (deleted_at); + +-- 2) 创建日聚合表 +CREATE TABLE IF NOT EXISTS channel_monitor_daily_rollups ( + id BIGSERIAL PRIMARY KEY, + monitor_id BIGINT NOT NULL REFERENCES channel_monitors(id) ON DELETE CASCADE, + model VARCHAR(200) NOT NULL, + bucket_date DATE NOT NULL, + total_checks INT NOT NULL DEFAULT 0, + ok_count INT NOT NULL DEFAULT 0, + operational_count INT NOT NULL DEFAULT 0, + degraded_count INT NOT NULL DEFAULT 0, + failed_count INT NOT NULL DEFAULT 0, + error_count INT NOT NULL DEFAULT 0, + sum_latency_ms BIGINT NOT NULL DEFAULT 0, + count_latency INT NOT NULL DEFAULT 0, + sum_ping_latency_ms BIGINT NOT NULL DEFAULT 0, + count_ping_latency INT NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_unique + ON channel_monitor_daily_rollups (monitor_id, model, bucket_date); +CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_bucket + ON channel_monitor_daily_rollups (bucket_date); +CREATE INDEX IF NOT EXISTS idx_channel_monitor_daily_rollups_deleted_at + ON channel_monitor_daily_rollups (deleted_at); + +-- 3) 创建 watermark 表(单行:id=1) +CREATE TABLE IF NOT EXISTS channel_monitor_aggregation_watermark ( + id INT PRIMARY KEY DEFAULT 1, + last_aggregated_date DATE, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + CONSTRAINT channel_monitor_aggregation_watermark_singleton CHECK (id = 1) +); + +INSERT INTO channel_monitor_aggregation_watermark (id, last_aggregated_date, updated_at) +VALUES (1, NULL, NOW()) +ON CONFLICT (id) DO NOTHING; diff --git a/frontend/src/components/admin/monitor/MonitorFormDialog.vue b/frontend/src/components/admin/monitor/MonitorFormDialog.vue index 56a06a9f..836ec079 100644 --- a/frontend/src/components/admin/monitor/MonitorFormDialog.vue +++ b/frontend/src/components/admin/monitor/MonitorFormDialog.vue @@ -113,6 +113,7 @@ :loading="myKeysLoading" :keys="myActiveKeys" :provider="form.provider" + :user-group-rates="userGroupRates" @close="showKeyPicker = false" @pick="pickMyKey" /> @@ -125,6 +126,7 @@ import { useAppStore } from '@/stores/app' import { extractApiErrorMessage } from '@/utils/apiError' import { adminAPI } from '@/api/admin' import { keysAPI } from '@/api/keys' +import { userGroupsAPI } from '@/api/groups' import type { ChannelMonitor, CreateParams, @@ -175,6 +177,7 @@ const submitting = ref(false) const showKeyPicker = ref(false) const myKeysLoading = ref(false) const myActiveKeys = ref([]) +const userGroupRates = ref>({}) interface MonitorForm { name: string @@ -263,7 +266,10 @@ async function openMyKeyPicker() { if (myActiveKeys.value.length > 0) return myKeysLoading.value = true try { - const res = await keysAPI.list(1, 100, { status: 'active' }) + const [res, rates] = await Promise.all([ + keysAPI.list(1, 100, { status: 'active' }), + userGroupsAPI.getUserGroupRates(), + ]) const items = res.items || [] const now = Date.now() myActiveKeys.value = items.filter(k => { @@ -271,6 +277,7 @@ async function openMyKeyPicker() { if (!k.expires_at) return true return new Date(k.expires_at).getTime() > now }) + userGroupRates.value = rates } catch (err: unknown) { appStore.showError(extractApiErrorMessage(err, t('admin.channelMonitor.form.noActiveKey'))) } finally { diff --git a/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue b/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue index 4fd71cb2..8df8d586 100644 --- a/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue +++ b/frontend/src/components/admin/monitor/MonitorKeyPickerDialog.vue @@ -47,9 +47,14 @@ {{ k.name }} {{ maskApiKey(k.key) }} - - {{ k.group.name }} - + @@ -73,14 +78,18 @@ import { useI18n } from 'vue-i18n' import type { ApiKey } from '@/types' import type { Provider } from '@/api/admin/channelMonitor' import BaseDialog from '@/components/common/BaseDialog.vue' +import GroupBadge from '@/components/common/GroupBadge.vue' import { maskApiKey } from '@/utils/maskApiKey' -const props = defineProps<{ +const props = withDefaults(defineProps<{ show: boolean loading: boolean keys: ApiKey[] provider: Provider -}>() + userGroupRates?: Record +}>(), { + userGroupRates: () => ({}), +}) defineEmits<{ (e: 'close'): void