明细只保留 1 天,超过 1 天聚合到新表 channel_monitor_daily_rollups(按 monitor_id/model/bucket_date 维度),聚合保留 30 天。两张表都用 SoftDeleteMixin 软删除(DELETE 自动改为 UPDATE deleted_at = NOW())。 聚合 + 清理任务由 OpsCleanupService 的 cron 统一调度,与运维监控的清理共享 schedule(默认 0 2 * * *)和 leader lock。ChannelMonitorRunner 的 cleanupLoop 被移除,只保留 dueCheckLoop。 读取路径 ComputeAvailability* 改为 UNION 明细(今天 deleted_at IS NULL)+ 聚合(过去 windowDays 天 deleted_at IS NULL),SUM(ok)/SUM(total) 自然加权 计算可用率,AVG latency 用 SUM(sum_latency_ms)/SUM(count_latency)。 watermark 表 channel_monitor_aggregation_watermark 单行(id=1),记录 last_aggregated_date,重启后从该日期 +1 继续聚合,首次为 nil 则从 today - 30d 开始回填,单次最多 35 天上限避免长事务。 raw SQL 的 ListLatestPerModel / ListLatestForMonitorIDs / ListRecentHistoryForMonitors 都补上 deleted_at IS NULL 过滤(SoftDeleteMixin interceptor 只对 ent query 生效)。 bump version to 0.1.114.28 GroupBadge 在 MonitorKeyPickerDialog 中复用平台主题色 + 倍率/专属倍率 (顺手优化)。
644 lines
20 KiB
Go
644 lines
20 KiB
Go
// Code generated by ent, DO NOT EDIT.
|
|
|
|
package ent
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"math"
|
|
|
|
"entgo.io/ent"
|
|
"entgo.io/ent/dialect"
|
|
"entgo.io/ent/dialect/sql"
|
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
|
"entgo.io/ent/schema/field"
|
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitor"
|
|
"github.com/Wei-Shaw/sub2api/ent/channelmonitordailyrollup"
|
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
|
)
|
|
|
|
// ChannelMonitorDailyRollupQuery is the builder for querying ChannelMonitorDailyRollup entities.
|
|
type ChannelMonitorDailyRollupQuery struct {
|
|
config
|
|
ctx *QueryContext
|
|
order []channelmonitordailyrollup.OrderOption
|
|
inters []Interceptor
|
|
predicates []predicate.ChannelMonitorDailyRollup
|
|
withMonitor *ChannelMonitorQuery
|
|
modifiers []func(*sql.Selector)
|
|
// intermediate query (i.e. traversal path).
|
|
sql *sql.Selector
|
|
path func(context.Context) (*sql.Selector, error)
|
|
}
|
|
|
|
// Where adds a new predicate for the ChannelMonitorDailyRollupQuery builder.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Where(ps ...predicate.ChannelMonitorDailyRollup) *ChannelMonitorDailyRollupQuery {
|
|
_q.predicates = append(_q.predicates, ps...)
|
|
return _q
|
|
}
|
|
|
|
// Limit the number of records to be returned by this query.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Limit(limit int) *ChannelMonitorDailyRollupQuery {
|
|
_q.ctx.Limit = &limit
|
|
return _q
|
|
}
|
|
|
|
// Offset to start from.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Offset(offset int) *ChannelMonitorDailyRollupQuery {
|
|
_q.ctx.Offset = &offset
|
|
return _q
|
|
}
|
|
|
|
// Unique configures the query builder to filter duplicate records on query.
|
|
// By default, unique is set to true, and can be disabled using this method.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Unique(unique bool) *ChannelMonitorDailyRollupQuery {
|
|
_q.ctx.Unique = &unique
|
|
return _q
|
|
}
|
|
|
|
// Order specifies how the records should be ordered.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Order(o ...channelmonitordailyrollup.OrderOption) *ChannelMonitorDailyRollupQuery {
|
|
_q.order = append(_q.order, o...)
|
|
return _q
|
|
}
|
|
|
|
// QueryMonitor chains the current query on the "monitor" edge.
|
|
func (_q *ChannelMonitorDailyRollupQuery) QueryMonitor() *ChannelMonitorQuery {
|
|
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
|
if err := _q.prepareQuery(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
selector := _q.sqlQuery(ctx)
|
|
if err := selector.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
step := sqlgraph.NewStep(
|
|
sqlgraph.From(channelmonitordailyrollup.Table, channelmonitordailyrollup.FieldID, selector),
|
|
sqlgraph.To(channelmonitor.Table, channelmonitor.FieldID),
|
|
sqlgraph.Edge(sqlgraph.M2O, true, channelmonitordailyrollup.MonitorTable, channelmonitordailyrollup.MonitorColumn),
|
|
)
|
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
|
return fromU, nil
|
|
}
|
|
return query
|
|
}
|
|
|
|
// First returns the first ChannelMonitorDailyRollup entity from the query.
|
|
// Returns a *NotFoundError when no ChannelMonitorDailyRollup was found.
|
|
func (_q *ChannelMonitorDailyRollupQuery) First(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if len(nodes) == 0 {
|
|
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
|
}
|
|
return nodes[0], nil
|
|
}
|
|
|
|
// FirstX is like First, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) FirstX(ctx context.Context) *ChannelMonitorDailyRollup {
|
|
node, err := _q.First(ctx)
|
|
if err != nil && !IsNotFound(err) {
|
|
panic(err)
|
|
}
|
|
return node
|
|
}
|
|
|
|
// FirstID returns the first ChannelMonitorDailyRollup ID from the query.
|
|
// Returns a *NotFoundError when no ChannelMonitorDailyRollup ID was found.
|
|
func (_q *ChannelMonitorDailyRollupQuery) FirstID(ctx context.Context) (id int64, err error) {
|
|
var ids []int64
|
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
|
return
|
|
}
|
|
if len(ids) == 0 {
|
|
err = &NotFoundError{channelmonitordailyrollup.Label}
|
|
return
|
|
}
|
|
return ids[0], nil
|
|
}
|
|
|
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) FirstIDX(ctx context.Context) int64 {
|
|
id, err := _q.FirstID(ctx)
|
|
if err != nil && !IsNotFound(err) {
|
|
panic(err)
|
|
}
|
|
return id
|
|
}
|
|
|
|
// Only returns a single ChannelMonitorDailyRollup entity found by the query, ensuring it only returns one.
|
|
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup entity is found.
|
|
// Returns a *NotFoundError when no ChannelMonitorDailyRollup entities are found.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Only(ctx context.Context) (*ChannelMonitorDailyRollup, error) {
|
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
switch len(nodes) {
|
|
case 1:
|
|
return nodes[0], nil
|
|
case 0:
|
|
return nil, &NotFoundError{channelmonitordailyrollup.Label}
|
|
default:
|
|
return nil, &NotSingularError{channelmonitordailyrollup.Label}
|
|
}
|
|
}
|
|
|
|
// OnlyX is like Only, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) OnlyX(ctx context.Context) *ChannelMonitorDailyRollup {
|
|
node, err := _q.Only(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return node
|
|
}
|
|
|
|
// OnlyID is like Only, but returns the only ChannelMonitorDailyRollup ID in the query.
|
|
// Returns a *NotSingularError when more than one ChannelMonitorDailyRollup ID is found.
|
|
// Returns a *NotFoundError when no entities are found.
|
|
func (_q *ChannelMonitorDailyRollupQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
|
var ids []int64
|
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
|
return
|
|
}
|
|
switch len(ids) {
|
|
case 1:
|
|
id = ids[0]
|
|
case 0:
|
|
err = &NotFoundError{channelmonitordailyrollup.Label}
|
|
default:
|
|
err = &NotSingularError{channelmonitordailyrollup.Label}
|
|
}
|
|
return
|
|
}
|
|
|
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) OnlyIDX(ctx context.Context) int64 {
|
|
id, err := _q.OnlyID(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return id
|
|
}
|
|
|
|
// All executes the query and returns a list of ChannelMonitorDailyRollups.
|
|
func (_q *ChannelMonitorDailyRollupQuery) All(ctx context.Context) ([]*ChannelMonitorDailyRollup, error) {
|
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
|
if err := _q.prepareQuery(ctx); err != nil {
|
|
return nil, err
|
|
}
|
|
qr := querierAll[[]*ChannelMonitorDailyRollup, *ChannelMonitorDailyRollupQuery]()
|
|
return withInterceptors[[]*ChannelMonitorDailyRollup](ctx, _q, qr, _q.inters)
|
|
}
|
|
|
|
// AllX is like All, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) AllX(ctx context.Context) []*ChannelMonitorDailyRollup {
|
|
nodes, err := _q.All(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return nodes
|
|
}
|
|
|
|
// IDs executes the query and returns a list of ChannelMonitorDailyRollup IDs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
|
if _q.ctx.Unique == nil && _q.path != nil {
|
|
_q.Unique(true)
|
|
}
|
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
|
if err = _q.Select(channelmonitordailyrollup.FieldID).Scan(ctx, &ids); err != nil {
|
|
return nil, err
|
|
}
|
|
return ids, nil
|
|
}
|
|
|
|
// IDsX is like IDs, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) IDsX(ctx context.Context) []int64 {
|
|
ids, err := _q.IDs(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return ids
|
|
}
|
|
|
|
// Count returns the count of the given query.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Count(ctx context.Context) (int, error) {
|
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
|
if err := _q.prepareQuery(ctx); err != nil {
|
|
return 0, err
|
|
}
|
|
return withInterceptors[int](ctx, _q, querierCount[*ChannelMonitorDailyRollupQuery](), _q.inters)
|
|
}
|
|
|
|
// CountX is like Count, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) CountX(ctx context.Context) int {
|
|
count, err := _q.Count(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return count
|
|
}
|
|
|
|
// Exist returns true if the query has elements in the graph.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Exist(ctx context.Context) (bool, error) {
|
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
|
switch _, err := _q.FirstID(ctx); {
|
|
case IsNotFound(err):
|
|
return false, nil
|
|
case err != nil:
|
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
|
default:
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
// ExistX is like Exist, but panics if an error occurs.
|
|
func (_q *ChannelMonitorDailyRollupQuery) ExistX(ctx context.Context) bool {
|
|
exist, err := _q.Exist(ctx)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
return exist
|
|
}
|
|
|
|
// Clone returns a duplicate of the ChannelMonitorDailyRollupQuery builder, including all associated steps. It can be
|
|
// used to prepare common query builders and use them differently after the clone is made.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Clone() *ChannelMonitorDailyRollupQuery {
|
|
if _q == nil {
|
|
return nil
|
|
}
|
|
return &ChannelMonitorDailyRollupQuery{
|
|
config: _q.config,
|
|
ctx: _q.ctx.Clone(),
|
|
order: append([]channelmonitordailyrollup.OrderOption{}, _q.order...),
|
|
inters: append([]Interceptor{}, _q.inters...),
|
|
predicates: append([]predicate.ChannelMonitorDailyRollup{}, _q.predicates...),
|
|
withMonitor: _q.withMonitor.Clone(),
|
|
// clone intermediate query.
|
|
sql: _q.sql.Clone(),
|
|
path: _q.path,
|
|
}
|
|
}
|
|
|
|
// WithMonitor tells the query-builder to eager-load the nodes that are connected to
|
|
// the "monitor" edge. The optional arguments are used to configure the query builder of the edge.
|
|
func (_q *ChannelMonitorDailyRollupQuery) WithMonitor(opts ...func(*ChannelMonitorQuery)) *ChannelMonitorDailyRollupQuery {
|
|
query := (&ChannelMonitorClient{config: _q.config}).Query()
|
|
for _, opt := range opts {
|
|
opt(query)
|
|
}
|
|
_q.withMonitor = query
|
|
return _q
|
|
}
|
|
|
|
// GroupBy is used to group vertices by one or more fields/columns.
|
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
|
//
|
|
// Example:
|
|
//
|
|
// var v []struct {
|
|
// DeletedAt time.Time `json:"deleted_at,omitempty"`
|
|
// Count int `json:"count,omitempty"`
|
|
// }
|
|
//
|
|
// client.ChannelMonitorDailyRollup.Query().
|
|
// GroupBy(channelmonitordailyrollup.FieldDeletedAt).
|
|
// Aggregate(ent.Count()).
|
|
// Scan(ctx, &v)
|
|
func (_q *ChannelMonitorDailyRollupQuery) GroupBy(field string, fields ...string) *ChannelMonitorDailyRollupGroupBy {
|
|
_q.ctx.Fields = append([]string{field}, fields...)
|
|
grbuild := &ChannelMonitorDailyRollupGroupBy{build: _q}
|
|
grbuild.flds = &_q.ctx.Fields
|
|
grbuild.label = channelmonitordailyrollup.Label
|
|
grbuild.scan = grbuild.Scan
|
|
return grbuild
|
|
}
|
|
|
|
// Select allows the selection one or more fields/columns for the given query,
|
|
// instead of selecting all fields in the entity.
|
|
//
|
|
// Example:
|
|
//
|
|
// var v []struct {
|
|
// DeletedAt time.Time `json:"deleted_at,omitempty"`
|
|
// }
|
|
//
|
|
// client.ChannelMonitorDailyRollup.Query().
|
|
// Select(channelmonitordailyrollup.FieldDeletedAt).
|
|
// Scan(ctx, &v)
|
|
func (_q *ChannelMonitorDailyRollupQuery) Select(fields ...string) *ChannelMonitorDailyRollupSelect {
|
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
|
sbuild := &ChannelMonitorDailyRollupSelect{ChannelMonitorDailyRollupQuery: _q}
|
|
sbuild.label = channelmonitordailyrollup.Label
|
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
|
return sbuild
|
|
}
|
|
|
|
// Aggregate returns a ChannelMonitorDailyRollupSelect configured with the given aggregations.
|
|
func (_q *ChannelMonitorDailyRollupQuery) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
|
return _q.Select().Aggregate(fns...)
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) prepareQuery(ctx context.Context) error {
|
|
for _, inter := range _q.inters {
|
|
if inter == nil {
|
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
|
}
|
|
if trv, ok := inter.(Traverser); ok {
|
|
if err := trv.Traverse(ctx, _q); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
for _, f := range _q.ctx.Fields {
|
|
if !channelmonitordailyrollup.ValidColumn(f) {
|
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
|
}
|
|
}
|
|
if _q.path != nil {
|
|
prev, err := _q.path(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
_q.sql = prev
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ChannelMonitorDailyRollup, error) {
|
|
var (
|
|
nodes = []*ChannelMonitorDailyRollup{}
|
|
_spec = _q.querySpec()
|
|
loadedTypes = [1]bool{
|
|
_q.withMonitor != nil,
|
|
}
|
|
)
|
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
|
return (*ChannelMonitorDailyRollup).scanValues(nil, columns)
|
|
}
|
|
_spec.Assign = func(columns []string, values []any) error {
|
|
node := &ChannelMonitorDailyRollup{config: _q.config}
|
|
nodes = append(nodes, node)
|
|
node.Edges.loadedTypes = loadedTypes
|
|
return node.assignValues(columns, values)
|
|
}
|
|
if len(_q.modifiers) > 0 {
|
|
_spec.Modifiers = _q.modifiers
|
|
}
|
|
for i := range hooks {
|
|
hooks[i](ctx, _spec)
|
|
}
|
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
|
return nil, err
|
|
}
|
|
if len(nodes) == 0 {
|
|
return nodes, nil
|
|
}
|
|
if query := _q.withMonitor; query != nil {
|
|
if err := _q.loadMonitor(ctx, query, nodes, nil,
|
|
func(n *ChannelMonitorDailyRollup, e *ChannelMonitor) { n.Edges.Monitor = e }); err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
return nodes, nil
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) loadMonitor(ctx context.Context, query *ChannelMonitorQuery, nodes []*ChannelMonitorDailyRollup, init func(*ChannelMonitorDailyRollup), assign func(*ChannelMonitorDailyRollup, *ChannelMonitor)) error {
|
|
ids := make([]int64, 0, len(nodes))
|
|
nodeids := make(map[int64][]*ChannelMonitorDailyRollup)
|
|
for i := range nodes {
|
|
fk := nodes[i].MonitorID
|
|
if _, ok := nodeids[fk]; !ok {
|
|
ids = append(ids, fk)
|
|
}
|
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
|
}
|
|
if len(ids) == 0 {
|
|
return nil
|
|
}
|
|
query.Where(channelmonitor.IDIn(ids...))
|
|
neighbors, err := query.All(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for _, n := range neighbors {
|
|
nodes, ok := nodeids[n.ID]
|
|
if !ok {
|
|
return fmt.Errorf(`unexpected foreign-key "monitor_id" returned %v`, n.ID)
|
|
}
|
|
for i := range nodes {
|
|
assign(nodes[i], n)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) sqlCount(ctx context.Context) (int, error) {
|
|
_spec := _q.querySpec()
|
|
if len(_q.modifiers) > 0 {
|
|
_spec.Modifiers = _q.modifiers
|
|
}
|
|
_spec.Node.Columns = _q.ctx.Fields
|
|
if len(_q.ctx.Fields) > 0 {
|
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
|
}
|
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) querySpec() *sqlgraph.QuerySpec {
|
|
_spec := sqlgraph.NewQuerySpec(channelmonitordailyrollup.Table, channelmonitordailyrollup.Columns, sqlgraph.NewFieldSpec(channelmonitordailyrollup.FieldID, field.TypeInt64))
|
|
_spec.From = _q.sql
|
|
if unique := _q.ctx.Unique; unique != nil {
|
|
_spec.Unique = *unique
|
|
} else if _q.path != nil {
|
|
_spec.Unique = true
|
|
}
|
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
|
_spec.Node.Columns = append(_spec.Node.Columns, channelmonitordailyrollup.FieldID)
|
|
for i := range fields {
|
|
if fields[i] != channelmonitordailyrollup.FieldID {
|
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
|
}
|
|
}
|
|
if _q.withMonitor != nil {
|
|
_spec.Node.AddColumnOnce(channelmonitordailyrollup.FieldMonitorID)
|
|
}
|
|
}
|
|
if ps := _q.predicates; len(ps) > 0 {
|
|
_spec.Predicate = func(selector *sql.Selector) {
|
|
for i := range ps {
|
|
ps[i](selector)
|
|
}
|
|
}
|
|
}
|
|
if limit := _q.ctx.Limit; limit != nil {
|
|
_spec.Limit = *limit
|
|
}
|
|
if offset := _q.ctx.Offset; offset != nil {
|
|
_spec.Offset = *offset
|
|
}
|
|
if ps := _q.order; len(ps) > 0 {
|
|
_spec.Order = func(selector *sql.Selector) {
|
|
for i := range ps {
|
|
ps[i](selector)
|
|
}
|
|
}
|
|
}
|
|
return _spec
|
|
}
|
|
|
|
func (_q *ChannelMonitorDailyRollupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
|
builder := sql.Dialect(_q.driver.Dialect())
|
|
t1 := builder.Table(channelmonitordailyrollup.Table)
|
|
columns := _q.ctx.Fields
|
|
if len(columns) == 0 {
|
|
columns = channelmonitordailyrollup.Columns
|
|
}
|
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
|
if _q.sql != nil {
|
|
selector = _q.sql
|
|
selector.Select(selector.Columns(columns...)...)
|
|
}
|
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
|
selector.Distinct()
|
|
}
|
|
for _, m := range _q.modifiers {
|
|
m(selector)
|
|
}
|
|
for _, p := range _q.predicates {
|
|
p(selector)
|
|
}
|
|
for _, p := range _q.order {
|
|
p(selector)
|
|
}
|
|
if offset := _q.ctx.Offset; offset != nil {
|
|
// limit is mandatory for offset clause. We start
|
|
// with default value, and override it below if needed.
|
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
|
}
|
|
if limit := _q.ctx.Limit; limit != nil {
|
|
selector.Limit(*limit)
|
|
}
|
|
return selector
|
|
}
|
|
|
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
|
// either committed or rolled-back.
|
|
func (_q *ChannelMonitorDailyRollupQuery) ForUpdate(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
|
if _q.driver.Dialect() == dialect.Postgres {
|
|
_q.Unique(false)
|
|
}
|
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
|
s.ForUpdate(opts...)
|
|
})
|
|
return _q
|
|
}
|
|
|
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
|
// until your transaction commits.
|
|
func (_q *ChannelMonitorDailyRollupQuery) ForShare(opts ...sql.LockOption) *ChannelMonitorDailyRollupQuery {
|
|
if _q.driver.Dialect() == dialect.Postgres {
|
|
_q.Unique(false)
|
|
}
|
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
|
s.ForShare(opts...)
|
|
})
|
|
return _q
|
|
}
|
|
|
|
// ChannelMonitorDailyRollupGroupBy is the group-by builder for ChannelMonitorDailyRollup entities.
|
|
type ChannelMonitorDailyRollupGroupBy struct {
|
|
selector
|
|
build *ChannelMonitorDailyRollupQuery
|
|
}
|
|
|
|
// Aggregate adds the given aggregation functions to the group-by query.
|
|
func (_g *ChannelMonitorDailyRollupGroupBy) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupGroupBy {
|
|
_g.fns = append(_g.fns, fns...)
|
|
return _g
|
|
}
|
|
|
|
// Scan applies the selector query and scans the result into the given value.
|
|
func (_g *ChannelMonitorDailyRollupGroupBy) Scan(ctx context.Context, v any) error {
|
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
|
return err
|
|
}
|
|
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
|
}
|
|
|
|
func (_g *ChannelMonitorDailyRollupGroupBy) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
|
selector := root.sqlQuery(ctx).Select()
|
|
aggregation := make([]string, 0, len(_g.fns))
|
|
for _, fn := range _g.fns {
|
|
aggregation = append(aggregation, fn(selector))
|
|
}
|
|
if len(selector.SelectedColumns()) == 0 {
|
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
|
for _, f := range *_g.flds {
|
|
columns = append(columns, selector.C(f))
|
|
}
|
|
columns = append(columns, aggregation...)
|
|
selector.Select(columns...)
|
|
}
|
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
|
if err := selector.Err(); err != nil {
|
|
return err
|
|
}
|
|
rows := &sql.Rows{}
|
|
query, args := selector.Query()
|
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
|
return err
|
|
}
|
|
defer rows.Close()
|
|
return sql.ScanSlice(rows, v)
|
|
}
|
|
|
|
// ChannelMonitorDailyRollupSelect is the builder for selecting fields of ChannelMonitorDailyRollup entities.
|
|
type ChannelMonitorDailyRollupSelect struct {
|
|
*ChannelMonitorDailyRollupQuery
|
|
selector
|
|
}
|
|
|
|
// Aggregate adds the given aggregation functions to the selector query.
|
|
func (_s *ChannelMonitorDailyRollupSelect) Aggregate(fns ...AggregateFunc) *ChannelMonitorDailyRollupSelect {
|
|
_s.fns = append(_s.fns, fns...)
|
|
return _s
|
|
}
|
|
|
|
// Scan applies the selector query and scans the result into the given value.
|
|
func (_s *ChannelMonitorDailyRollupSelect) Scan(ctx context.Context, v any) error {
|
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
|
if err := _s.prepareQuery(ctx); err != nil {
|
|
return err
|
|
}
|
|
return scanWithInterceptors[*ChannelMonitorDailyRollupQuery, *ChannelMonitorDailyRollupSelect](ctx, _s.ChannelMonitorDailyRollupQuery, _s, _s.inters, v)
|
|
}
|
|
|
|
func (_s *ChannelMonitorDailyRollupSelect) sqlScan(ctx context.Context, root *ChannelMonitorDailyRollupQuery, v any) error {
|
|
selector := root.sqlQuery(ctx)
|
|
aggregation := make([]string, 0, len(_s.fns))
|
|
for _, fn := range _s.fns {
|
|
aggregation = append(aggregation, fn(selector))
|
|
}
|
|
switch n := len(*_s.selector.flds); {
|
|
case n == 0 && len(aggregation) > 0:
|
|
selector.Select(aggregation...)
|
|
case n != 0 && len(aggregation) > 0:
|
|
selector.AppendSelect(aggregation...)
|
|
}
|
|
rows := &sql.Rows{}
|
|
query, args := selector.Query()
|
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
|
return err
|
|
}
|
|
defer rows.Close()
|
|
return sql.ScanSlice(rows, v)
|
|
}
|