Merge remote-tracking branch 'upstream/main'
This commit is contained in:
@@ -543,6 +543,15 @@ func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *accountRepository) ClearError(ctx context.Context, id int64) error {
|
||||
_, err := r.client.Account.Update().
|
||||
Where(dbaccount.IDEQ(id)).
|
||||
SetStatus(service.StatusActive).
|
||||
SetErrorMessage("").
|
||||
Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error {
|
||||
_, err := r.client.AccountGroup.Create().
|
||||
SetAccountID(accountID).
|
||||
@@ -960,7 +969,16 @@ func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, s
|
||||
builder.SetSessionWindowEnd(*end)
|
||||
}
|
||||
_, err := builder.Save(ctx)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 触发调度器缓存更新(仅当窗口时间有变化时)
|
||||
if start != nil || end != nil {
|
||||
if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil {
|
||||
log.Printf("[SchedulerOutbox] enqueue session window update failed: account=%d err=%v", id, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedulable bool) error {
|
||||
|
||||
@@ -182,7 +182,9 @@ func (s *claudeOAuthService) ExchangeCodeForToken(ctx context.Context, code, cod
|
||||
|
||||
resp, err := client.R().
|
||||
SetContext(ctx).
|
||||
SetHeader("Accept", "application/json, text/plain, */*").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("User-Agent", "axios/1.8.4").
|
||||
SetBody(reqBody).
|
||||
SetSuccessResult(&tokenResp).
|
||||
Post(s.tokenURL)
|
||||
@@ -205,8 +207,6 @@ func (s *claudeOAuthService) ExchangeCodeForToken(ctx context.Context, code, cod
|
||||
func (s *claudeOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*oauth.TokenResponse, error) {
|
||||
client := s.clientFactory(proxyURL)
|
||||
|
||||
// 使用 JSON 格式(与 ExchangeCodeForToken 保持一致)
|
||||
// Anthropic OAuth API 期望 JSON 格式的请求体
|
||||
reqBody := map[string]any{
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": refreshToken,
|
||||
@@ -217,7 +217,9 @@ func (s *claudeOAuthService) RefreshToken(ctx context.Context, refreshToken, pro
|
||||
|
||||
resp, err := client.R().
|
||||
SetContext(ctx).
|
||||
SetHeader("Accept", "application/json, text/plain, */*").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
SetHeader("User-Agent", "axios/1.8.4").
|
||||
SetBody(reqBody).
|
||||
SetSuccessResult(&tokenResp).
|
||||
Post(s.tokenURL)
|
||||
|
||||
@@ -171,7 +171,7 @@ func (s *ClaudeOAuthServiceSuite) TestGetAuthorizationCode() {
|
||||
s.client.baseURL = "http://in-process"
|
||||
s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) }
|
||||
|
||||
code, err := s.client.GetAuthorizationCode(context.Background(), "sess", "org-1", oauth.ScopeProfile, "cc", "st", "")
|
||||
code, err := s.client.GetAuthorizationCode(context.Background(), "sess", "org-1", oauth.ScopeInference, "cc", "st", "")
|
||||
|
||||
if tt.wantErr {
|
||||
require.Error(s.T(), err)
|
||||
|
||||
@@ -14,37 +14,82 @@ import (
|
||||
|
||||
const defaultClaudeUsageURL = "https://api.anthropic.com/api/oauth/usage"
|
||||
|
||||
// 默认 User-Agent,与用户抓包的请求一致
|
||||
const defaultUsageUserAgent = "claude-code/2.1.7"
|
||||
|
||||
type claudeUsageService struct {
|
||||
usageURL string
|
||||
allowPrivateHosts bool
|
||||
httpUpstream service.HTTPUpstream
|
||||
}
|
||||
|
||||
func NewClaudeUsageFetcher() service.ClaudeUsageFetcher {
|
||||
return &claudeUsageService{usageURL: defaultClaudeUsageURL}
|
||||
// NewClaudeUsageFetcher 创建 Claude 用量获取服务
|
||||
// httpUpstream: 可选,如果提供则支持 TLS 指纹伪装
|
||||
func NewClaudeUsageFetcher(httpUpstream service.HTTPUpstream) service.ClaudeUsageFetcher {
|
||||
return &claudeUsageService{
|
||||
usageURL: defaultClaudeUsageURL,
|
||||
httpUpstream: httpUpstream,
|
||||
}
|
||||
}
|
||||
|
||||
// FetchUsage 简单版本,不支持 TLS 指纹(向后兼容)
|
||||
func (s *claudeUsageService) FetchUsage(ctx context.Context, accessToken, proxyURL string) (*service.ClaudeUsageResponse, error) {
|
||||
client, err := httpclient.GetClient(httpclient.Options{
|
||||
ProxyURL: proxyURL,
|
||||
Timeout: 30 * time.Second,
|
||||
ValidateResolvedIP: true,
|
||||
AllowPrivateHosts: s.allowPrivateHosts,
|
||||
return s.FetchUsageWithOptions(ctx, &service.ClaudeUsageFetchOptions{
|
||||
AccessToken: accessToken,
|
||||
ProxyURL: proxyURL,
|
||||
})
|
||||
if err != nil {
|
||||
client = &http.Client{Timeout: 30 * time.Second}
|
||||
}
|
||||
|
||||
// FetchUsageWithOptions 完整版本,支持 TLS 指纹和自定义 User-Agent
|
||||
func (s *claudeUsageService) FetchUsageWithOptions(ctx context.Context, opts *service.ClaudeUsageFetchOptions) (*service.ClaudeUsageResponse, error) {
|
||||
if opts == nil {
|
||||
return nil, fmt.Errorf("options is nil")
|
||||
}
|
||||
|
||||
// 创建请求
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", s.usageURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create request failed: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", "Bearer "+accessToken)
|
||||
// 设置请求头(与抓包一致,但不设置 Accept-Encoding,让 Go 自动处理压缩)
|
||||
req.Header.Set("Accept", "application/json, text/plain, */*")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", "Bearer "+opts.AccessToken)
|
||||
req.Header.Set("anthropic-beta", "oauth-2025-04-20")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
// 设置 User-Agent(优先使用缓存的 Fingerprint,否则使用默认值)
|
||||
userAgent := defaultUsageUserAgent
|
||||
if opts.Fingerprint != nil && opts.Fingerprint.UserAgent != "" {
|
||||
userAgent = opts.Fingerprint.UserAgent
|
||||
}
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
|
||||
var resp *http.Response
|
||||
|
||||
// 如果启用 TLS 指纹且有 HTTPUpstream,使用 DoWithTLS
|
||||
if opts.EnableTLSFingerprint && s.httpUpstream != nil {
|
||||
// accountConcurrency 传 0 使用默认连接池配置,usage 请求不需要特殊的并发设置
|
||||
resp, err = s.httpUpstream.DoWithTLS(req, opts.ProxyURL, opts.AccountID, 0, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request with TLS fingerprint failed: %w", err)
|
||||
}
|
||||
} else {
|
||||
// 不启用 TLS 指纹,使用普通 HTTP 客户端
|
||||
client, err := httpclient.GetClient(httpclient.Options{
|
||||
ProxyURL: opts.ProxyURL,
|
||||
Timeout: 30 * time.Second,
|
||||
ValidateResolvedIP: true,
|
||||
AllowPrivateHosts: s.allowPrivateHosts,
|
||||
})
|
||||
if err != nil {
|
||||
client = &http.Client{Timeout: 30 * time.Second}
|
||||
}
|
||||
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
|
||||
@@ -77,6 +77,75 @@ func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, sta
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *dashboardAggregationRepository) RecomputeRange(ctx context.Context, start, end time.Time) error {
|
||||
if r == nil || r.sql == nil {
|
||||
return nil
|
||||
}
|
||||
loc := timezone.Location()
|
||||
startLocal := start.In(loc)
|
||||
endLocal := end.In(loc)
|
||||
if !endLocal.After(startLocal) {
|
||||
return nil
|
||||
}
|
||||
|
||||
hourStart := startLocal.Truncate(time.Hour)
|
||||
hourEnd := endLocal.Truncate(time.Hour)
|
||||
if endLocal.After(hourEnd) {
|
||||
hourEnd = hourEnd.Add(time.Hour)
|
||||
}
|
||||
|
||||
dayStart := truncateToDay(startLocal)
|
||||
dayEnd := truncateToDay(endLocal)
|
||||
if endLocal.After(dayEnd) {
|
||||
dayEnd = dayEnd.Add(24 * time.Hour)
|
||||
}
|
||||
|
||||
// 尽量使用事务保证范围内的一致性(允许在非 *sql.DB 的情况下退化为非事务执行)。
|
||||
if db, ok := r.sql.(*sql.DB); ok {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txRepo := newDashboardAggregationRepositoryWithSQL(tx)
|
||||
if err := txRepo.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
return r.recomputeRangeInTx(ctx, hourStart, hourEnd, dayStart, dayEnd)
|
||||
}
|
||||
|
||||
func (r *dashboardAggregationRepository) recomputeRangeInTx(ctx context.Context, hourStart, hourEnd, dayStart, dayEnd time.Time) error {
|
||||
// 先清空范围内桶,再重建(避免仅增量插入导致活跃用户等指标无法回退)。
|
||||
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start >= $1 AND bucket_start < $2", hourStart, hourEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date >= $1::date AND bucket_date < $2::date", dayStart, dayEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) {
|
||||
var ts time.Time
|
||||
query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -18,16 +18,8 @@ import (
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/tlsfingerprint"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/Wei-Shaw/sub2api/internal/util/urlvalidator"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// debugLog prints log only in non-release mode.
|
||||
func debugLog(format string, v ...any) {
|
||||
if gin.Mode() != gin.ReleaseMode {
|
||||
log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// 默认配置常量
|
||||
// 这些值在配置文件未指定时作为回退默认值使用
|
||||
const (
|
||||
@@ -189,7 +181,7 @@ func (s *httpUpstreamService) DoWithTLS(req *http.Request, proxyURL string, acco
|
||||
if proxyURL != "" {
|
||||
proxyInfo = proxyURL
|
||||
}
|
||||
debugLog("[TLS Fingerprint] Account %d: TLS fingerprint ENABLED, target=%s, proxy=%s", accountID, targetHost, proxyInfo)
|
||||
slog.Debug("tls_fingerprint_enabled", "account_id", accountID, "target", targetHost, "proxy", proxyInfo)
|
||||
|
||||
if err := s.validateRequestHost(req); err != nil {
|
||||
return nil, err
|
||||
@@ -200,16 +192,16 @@ func (s *httpUpstreamService) DoWithTLS(req *http.Request, proxyURL string, acco
|
||||
profile := registry.GetProfileByAccountID(accountID)
|
||||
if profile == nil {
|
||||
// 如果获取不到 profile,回退到普通请求
|
||||
debugLog("[TLS Fingerprint] Account %d: WARNING - no profile found, falling back to standard request", accountID)
|
||||
slog.Debug("tls_fingerprint_no_profile", "account_id", accountID, "fallback", "standard_request")
|
||||
return s.Do(req, proxyURL, accountID, accountConcurrency)
|
||||
}
|
||||
|
||||
debugLog("[TLS Fingerprint] Account %d: Using profile '%s' (GREASE=%v)", accountID, profile.Name, profile.EnableGREASE)
|
||||
slog.Debug("tls_fingerprint_using_profile", "account_id", accountID, "profile", profile.Name, "grease", profile.EnableGREASE)
|
||||
|
||||
// 获取或创建带 TLS 指纹的客户端
|
||||
entry, err := s.acquireClientWithTLS(proxyURL, accountID, accountConcurrency, profile)
|
||||
if err != nil {
|
||||
debugLog("[TLS Fingerprint] Account %d: Failed to acquire TLS client: %v", accountID, err)
|
||||
slog.Debug("tls_fingerprint_acquire_client_failed", "account_id", accountID, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -219,11 +211,11 @@ func (s *httpUpstreamService) DoWithTLS(req *http.Request, proxyURL string, acco
|
||||
// 请求失败,立即减少计数
|
||||
atomic.AddInt64(&entry.inFlight, -1)
|
||||
atomic.StoreInt64(&entry.lastUsed, time.Now().UnixNano())
|
||||
debugLog("[TLS Fingerprint] Account %d: Request FAILED: %v", accountID, err)
|
||||
slog.Debug("tls_fingerprint_request_failed", "account_id", accountID, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
debugLog("[TLS Fingerprint] Account %d: Request SUCCESS, status=%d", accountID, resp.StatusCode)
|
||||
slog.Debug("tls_fingerprint_request_success", "account_id", accountID, "status", resp.StatusCode)
|
||||
|
||||
// 包装响应体,在关闭时自动减少计数并更新时间戳
|
||||
resp.Body = wrapTrackedBody(resp.Body, func() {
|
||||
@@ -259,7 +251,7 @@ func (s *httpUpstreamService) getClientEntryWithTLS(proxyURL string, accountID i
|
||||
atomic.AddInt64(&entry.inFlight, 1)
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
debugLog("[TLS Fingerprint] Account %d: Reusing existing TLS client (cacheKey=%s)", accountID, cacheKey)
|
||||
slog.Debug("tls_fingerprint_reusing_client", "account_id", accountID, "cache_key", cacheKey)
|
||||
return entry, nil
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
@@ -273,11 +265,14 @@ func (s *httpUpstreamService) getClientEntryWithTLS(proxyURL string, accountID i
|
||||
atomic.AddInt64(&entry.inFlight, 1)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
debugLog("[TLS Fingerprint] Account %d: Reusing existing TLS client (cacheKey=%s)", accountID, cacheKey)
|
||||
slog.Debug("tls_fingerprint_reusing_client", "account_id", accountID, "cache_key", cacheKey)
|
||||
return entry, nil
|
||||
}
|
||||
debugLog("[TLS Fingerprint] Account %d: Evicting stale TLS client (cacheKey=%s, proxyChanged=%v, poolChanged=%v)",
|
||||
accountID, cacheKey, entry.proxyKey != proxyKey, entry.poolKey != poolKey)
|
||||
slog.Debug("tls_fingerprint_evicting_stale_client",
|
||||
"account_id", accountID,
|
||||
"cache_key", cacheKey,
|
||||
"proxy_changed", entry.proxyKey != proxyKey,
|
||||
"pool_changed", entry.poolKey != poolKey)
|
||||
s.removeClientLocked(cacheKey, entry)
|
||||
}
|
||||
|
||||
@@ -293,8 +288,7 @@ func (s *httpUpstreamService) getClientEntryWithTLS(proxyURL string, accountID i
|
||||
}
|
||||
|
||||
// 创建带 TLS 指纹的 Transport
|
||||
debugLog("[TLS Fingerprint] Account %d: Creating NEW TLS fingerprint client (cacheKey=%s, proxy=%s)",
|
||||
accountID, cacheKey, proxyKey)
|
||||
slog.Debug("tls_fingerprint_creating_new_client", "account_id", accountID, "cache_key", cacheKey, "proxy", proxyKey)
|
||||
settings := s.resolvePoolSettings(isolation, accountConcurrency)
|
||||
transport, err := buildUpstreamTransportWithTLSFingerprint(settings, parsedProxy, profile)
|
||||
if err != nil {
|
||||
@@ -822,7 +816,7 @@ func buildUpstreamTransportWithTLSFingerprint(settings poolSettings, proxyURL *u
|
||||
// 根据代理类型选择合适的 TLS 指纹 Dialer
|
||||
if proxyURL == nil {
|
||||
// 直连:使用 TLSFingerprintDialer
|
||||
debugLog("[TLS Fingerprint Transport] Using DIRECT TLS dialer (no proxy)")
|
||||
slog.Debug("tls_fingerprint_transport_direct")
|
||||
dialer := tlsfingerprint.NewDialer(profile, nil)
|
||||
transport.DialTLSContext = dialer.DialTLSContext
|
||||
} else {
|
||||
@@ -830,17 +824,17 @@ func buildUpstreamTransportWithTLSFingerprint(settings poolSettings, proxyURL *u
|
||||
switch scheme {
|
||||
case "socks5", "socks5h":
|
||||
// SOCKS5 代理:使用 SOCKS5ProxyDialer
|
||||
debugLog("[TLS Fingerprint Transport] Using SOCKS5 TLS dialer (proxy=%s)", proxyURL.Host)
|
||||
slog.Debug("tls_fingerprint_transport_socks5", "proxy", proxyURL.Host)
|
||||
socks5Dialer := tlsfingerprint.NewSOCKS5ProxyDialer(profile, proxyURL)
|
||||
transport.DialTLSContext = socks5Dialer.DialTLSContext
|
||||
case "http", "https":
|
||||
// HTTP/HTTPS 代理:使用 HTTPProxyDialer(CONNECT 隧道)
|
||||
debugLog("[TLS Fingerprint Transport] Using HTTP CONNECT TLS dialer (proxy=%s)", proxyURL.Host)
|
||||
slog.Debug("tls_fingerprint_transport_http_connect", "proxy", proxyURL.Host)
|
||||
httpDialer := tlsfingerprint.NewHTTPProxyDialer(profile, proxyURL)
|
||||
transport.DialTLSContext = httpDialer.DialTLSContext
|
||||
default:
|
||||
// 未知代理类型,回退到普通代理配置(无 TLS 指纹)
|
||||
debugLog("[TLS Fingerprint Transport] WARNING: Unknown proxy scheme '%s', falling back to standard proxy (NO TLS fingerprint)", scheme)
|
||||
slog.Debug("tls_fingerprint_transport_unknown_scheme_fallback", "scheme", scheme)
|
||||
if err := proxyutil.ConfigureTransportProxy(transport, proxyURL); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -11,8 +11,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
fingerprintKeyPrefix = "fingerprint:"
|
||||
fingerprintTTL = 24 * time.Hour
|
||||
fingerprintKeyPrefix = "fingerprint:"
|
||||
fingerprintTTL = 24 * time.Hour
|
||||
maskedSessionKeyPrefix = "masked_session:"
|
||||
maskedSessionTTL = 15 * time.Minute
|
||||
)
|
||||
|
||||
// fingerprintKey generates the Redis key for account fingerprint cache.
|
||||
@@ -20,6 +22,11 @@ func fingerprintKey(accountID int64) string {
|
||||
return fmt.Sprintf("%s%d", fingerprintKeyPrefix, accountID)
|
||||
}
|
||||
|
||||
// maskedSessionKey generates the Redis key for masked session ID cache.
|
||||
func maskedSessionKey(accountID int64) string {
|
||||
return fmt.Sprintf("%s%d", maskedSessionKeyPrefix, accountID)
|
||||
}
|
||||
|
||||
type identityCache struct {
|
||||
rdb *redis.Client
|
||||
}
|
||||
@@ -49,3 +56,20 @@ func (c *identityCache) SetFingerprint(ctx context.Context, accountID int64, fp
|
||||
}
|
||||
return c.rdb.Set(ctx, key, val, fingerprintTTL).Err()
|
||||
}
|
||||
|
||||
func (c *identityCache) GetMaskedSessionID(ctx context.Context, accountID int64) (string, error) {
|
||||
key := maskedSessionKey(accountID)
|
||||
val, err := c.rdb.Get(ctx, key).Result()
|
||||
if err != nil {
|
||||
if err == redis.Nil {
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *identityCache) SetMaskedSessionID(ctx context.Context, accountID int64, sessionID string) error {
|
||||
key := maskedSessionKey(accountID)
|
||||
return c.rdb.Set(ctx, key, sessionID, maskedSessionTTL).Err()
|
||||
}
|
||||
|
||||
@@ -217,7 +217,7 @@ func (c *sessionLimitCache) GetActiveSessionCount(ctx context.Context, accountID
|
||||
}
|
||||
|
||||
// GetActiveSessionCountBatch 批量获取多个账号的活跃会话数
|
||||
func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) {
|
||||
func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64, idleTimeouts map[int64]time.Duration) (map[int64]int, error) {
|
||||
if len(accountIDs) == 0 {
|
||||
return make(map[int64]int), nil
|
||||
}
|
||||
@@ -226,11 +226,18 @@ func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, acco
|
||||
|
||||
// 使用 pipeline 批量执行
|
||||
pipe := c.rdb.Pipeline()
|
||||
idleTimeoutSeconds := int(c.defaultIdleTimeout.Seconds())
|
||||
|
||||
cmds := make(map[int64]*redis.Cmd, len(accountIDs))
|
||||
for _, accountID := range accountIDs {
|
||||
key := sessionLimitKey(accountID)
|
||||
// 使用各账号自己的 idleTimeout,如果没有则用默认值
|
||||
idleTimeout := c.defaultIdleTimeout
|
||||
if idleTimeouts != nil {
|
||||
if t, ok := idleTimeouts[accountID]; ok && t > 0 {
|
||||
idleTimeout = t
|
||||
}
|
||||
}
|
||||
idleTimeoutSeconds := int(idleTimeout.Seconds())
|
||||
cmds[accountID] = getActiveSessionCountScript.Run(ctx, pipe, []string{key}, idleTimeoutSeconds)
|
||||
}
|
||||
|
||||
|
||||
551
backend/internal/repository/usage_cleanup_repo.go
Normal file
551
backend/internal/repository/usage_cleanup_repo.go
Normal file
@@ -0,0 +1,551 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
)
|
||||
|
||||
type usageCleanupRepository struct {
|
||||
client *dbent.Client
|
||||
sql sqlExecutor
|
||||
}
|
||||
|
||||
func NewUsageCleanupRepository(client *dbent.Client, sqlDB *sql.DB) service.UsageCleanupRepository {
|
||||
return newUsageCleanupRepositoryWithSQL(client, sqlDB)
|
||||
}
|
||||
|
||||
func newUsageCleanupRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *usageCleanupRepository {
|
||||
return &usageCleanupRepository{client: client, sql: sqlq}
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) CreateTask(ctx context.Context, task *service.UsageCleanupTask) error {
|
||||
if task == nil {
|
||||
return nil
|
||||
}
|
||||
if r.client != nil {
|
||||
return r.createTaskWithEnt(ctx, task)
|
||||
}
|
||||
return r.createTaskWithSQL(ctx, task)
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) ListTasks(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) {
|
||||
if r.client != nil {
|
||||
return r.listTasksWithEnt(ctx, params)
|
||||
}
|
||||
var total int64
|
||||
if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM usage_cleanup_tasks", nil, &total); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if total == 0 {
|
||||
return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil
|
||||
}
|
||||
|
||||
query := `
|
||||
SELECT id, status, filters, created_by, deleted_rows, error_message,
|
||||
canceled_by, canceled_at,
|
||||
started_at, finished_at, created_at, updated_at
|
||||
FROM usage_cleanup_tasks
|
||||
ORDER BY created_at DESC, id DESC
|
||||
LIMIT $1 OFFSET $2
|
||||
`
|
||||
rows, err := r.sql.QueryContext(ctx, query, params.Limit(), params.Offset())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
tasks := make([]service.UsageCleanupTask, 0)
|
||||
for rows.Next() {
|
||||
var task service.UsageCleanupTask
|
||||
var filtersJSON []byte
|
||||
var errMsg sql.NullString
|
||||
var canceledBy sql.NullInt64
|
||||
var canceledAt sql.NullTime
|
||||
var startedAt sql.NullTime
|
||||
var finishedAt sql.NullTime
|
||||
if err := rows.Scan(
|
||||
&task.ID,
|
||||
&task.Status,
|
||||
&filtersJSON,
|
||||
&task.CreatedBy,
|
||||
&task.DeletedRows,
|
||||
&errMsg,
|
||||
&canceledBy,
|
||||
&canceledAt,
|
||||
&startedAt,
|
||||
&finishedAt,
|
||||
&task.CreatedAt,
|
||||
&task.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil {
|
||||
return nil, nil, fmt.Errorf("parse cleanup filters: %w", err)
|
||||
}
|
||||
if errMsg.Valid {
|
||||
task.ErrorMsg = &errMsg.String
|
||||
}
|
||||
if canceledBy.Valid {
|
||||
v := canceledBy.Int64
|
||||
task.CanceledBy = &v
|
||||
}
|
||||
if canceledAt.Valid {
|
||||
task.CanceledAt = &canceledAt.Time
|
||||
}
|
||||
if startedAt.Valid {
|
||||
task.StartedAt = &startedAt.Time
|
||||
}
|
||||
if finishedAt.Valid {
|
||||
task.FinishedAt = &finishedAt.Time
|
||||
}
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return tasks, paginationResultFromTotal(total, params), nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) ClaimNextPendingTask(ctx context.Context, staleRunningAfterSeconds int64) (*service.UsageCleanupTask, error) {
|
||||
if staleRunningAfterSeconds <= 0 {
|
||||
staleRunningAfterSeconds = 1800
|
||||
}
|
||||
query := `
|
||||
WITH next AS (
|
||||
SELECT id
|
||||
FROM usage_cleanup_tasks
|
||||
WHERE status = $1
|
||||
OR (
|
||||
status = $2
|
||||
AND started_at IS NOT NULL
|
||||
AND started_at < NOW() - ($3 * interval '1 second')
|
||||
)
|
||||
ORDER BY created_at ASC
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
UPDATE usage_cleanup_tasks AS tasks
|
||||
SET status = $4,
|
||||
started_at = NOW(),
|
||||
finished_at = NULL,
|
||||
error_message = NULL,
|
||||
updated_at = NOW()
|
||||
FROM next
|
||||
WHERE tasks.id = next.id
|
||||
RETURNING tasks.id, tasks.status, tasks.filters, tasks.created_by, tasks.deleted_rows, tasks.error_message,
|
||||
tasks.started_at, tasks.finished_at, tasks.created_at, tasks.updated_at
|
||||
`
|
||||
var task service.UsageCleanupTask
|
||||
var filtersJSON []byte
|
||||
var errMsg sql.NullString
|
||||
var startedAt sql.NullTime
|
||||
var finishedAt sql.NullTime
|
||||
if err := scanSingleRow(
|
||||
ctx,
|
||||
r.sql,
|
||||
query,
|
||||
[]any{
|
||||
service.UsageCleanupStatusPending,
|
||||
service.UsageCleanupStatusRunning,
|
||||
staleRunningAfterSeconds,
|
||||
service.UsageCleanupStatusRunning,
|
||||
},
|
||||
&task.ID,
|
||||
&task.Status,
|
||||
&filtersJSON,
|
||||
&task.CreatedBy,
|
||||
&task.DeletedRows,
|
||||
&errMsg,
|
||||
&startedAt,
|
||||
&finishedAt,
|
||||
&task.CreatedAt,
|
||||
&task.UpdatedAt,
|
||||
); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(filtersJSON, &task.Filters); err != nil {
|
||||
return nil, fmt.Errorf("parse cleanup filters: %w", err)
|
||||
}
|
||||
if errMsg.Valid {
|
||||
task.ErrorMsg = &errMsg.String
|
||||
}
|
||||
if startedAt.Valid {
|
||||
task.StartedAt = &startedAt.Time
|
||||
}
|
||||
if finishedAt.Valid {
|
||||
task.FinishedAt = &finishedAt.Time
|
||||
}
|
||||
return &task, nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) GetTaskStatus(ctx context.Context, taskID int64) (string, error) {
|
||||
if r.client != nil {
|
||||
return r.getTaskStatusWithEnt(ctx, taskID)
|
||||
}
|
||||
var status string
|
||||
if err := scanSingleRow(ctx, r.sql, "SELECT status FROM usage_cleanup_tasks WHERE id = $1", []any{taskID}, &status); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) UpdateTaskProgress(ctx context.Context, taskID int64, deletedRows int64) error {
|
||||
if r.client != nil {
|
||||
return r.updateTaskProgressWithEnt(ctx, taskID, deletedRows)
|
||||
}
|
||||
query := `
|
||||
UPDATE usage_cleanup_tasks
|
||||
SET deleted_rows = $1,
|
||||
updated_at = NOW()
|
||||
WHERE id = $2
|
||||
`
|
||||
_, err := r.sql.ExecContext(ctx, query, deletedRows, taskID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) CancelTask(ctx context.Context, taskID int64, canceledBy int64) (bool, error) {
|
||||
if r.client != nil {
|
||||
return r.cancelTaskWithEnt(ctx, taskID, canceledBy)
|
||||
}
|
||||
query := `
|
||||
UPDATE usage_cleanup_tasks
|
||||
SET status = $1,
|
||||
canceled_by = $3,
|
||||
canceled_at = NOW(),
|
||||
finished_at = NOW(),
|
||||
error_message = NULL,
|
||||
updated_at = NOW()
|
||||
WHERE id = $2
|
||||
AND status IN ($4, $5)
|
||||
RETURNING id
|
||||
`
|
||||
var id int64
|
||||
err := scanSingleRow(ctx, r.sql, query, []any{
|
||||
service.UsageCleanupStatusCanceled,
|
||||
taskID,
|
||||
canceledBy,
|
||||
service.UsageCleanupStatusPending,
|
||||
service.UsageCleanupStatusRunning,
|
||||
}, &id)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) MarkTaskSucceeded(ctx context.Context, taskID int64, deletedRows int64) error {
|
||||
if r.client != nil {
|
||||
return r.markTaskSucceededWithEnt(ctx, taskID, deletedRows)
|
||||
}
|
||||
query := `
|
||||
UPDATE usage_cleanup_tasks
|
||||
SET status = $1,
|
||||
deleted_rows = $2,
|
||||
finished_at = NOW(),
|
||||
updated_at = NOW()
|
||||
WHERE id = $3
|
||||
`
|
||||
_, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusSucceeded, deletedRows, taskID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) MarkTaskFailed(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error {
|
||||
if r.client != nil {
|
||||
return r.markTaskFailedWithEnt(ctx, taskID, deletedRows, errorMsg)
|
||||
}
|
||||
query := `
|
||||
UPDATE usage_cleanup_tasks
|
||||
SET status = $1,
|
||||
deleted_rows = $2,
|
||||
error_message = $3,
|
||||
finished_at = NOW(),
|
||||
updated_at = NOW()
|
||||
WHERE id = $4
|
||||
`
|
||||
_, err := r.sql.ExecContext(ctx, query, service.UsageCleanupStatusFailed, deletedRows, errorMsg, taskID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) DeleteUsageLogsBatch(ctx context.Context, filters service.UsageCleanupFilters, limit int) (int64, error) {
|
||||
if filters.StartTime.IsZero() || filters.EndTime.IsZero() {
|
||||
return 0, fmt.Errorf("cleanup filters missing time range")
|
||||
}
|
||||
whereClause, args := buildUsageCleanupWhere(filters)
|
||||
if whereClause == "" {
|
||||
return 0, fmt.Errorf("cleanup filters missing time range")
|
||||
}
|
||||
args = append(args, limit)
|
||||
query := fmt.Sprintf(`
|
||||
WITH target AS (
|
||||
SELECT id
|
||||
FROM usage_logs
|
||||
WHERE %s
|
||||
ORDER BY created_at ASC, id ASC
|
||||
LIMIT $%d
|
||||
)
|
||||
DELETE FROM usage_logs
|
||||
WHERE id IN (SELECT id FROM target)
|
||||
RETURNING id
|
||||
`, whereClause, len(args))
|
||||
|
||||
rows, err := r.sql.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var deleted int64
|
||||
for rows.Next() {
|
||||
deleted++
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return deleted, nil
|
||||
}
|
||||
|
||||
func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) {
|
||||
conditions := make([]string, 0, 8)
|
||||
args := make([]any, 0, 8)
|
||||
idx := 1
|
||||
if !filters.StartTime.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("created_at >= $%d", idx))
|
||||
args = append(args, filters.StartTime)
|
||||
idx++
|
||||
}
|
||||
if !filters.EndTime.IsZero() {
|
||||
conditions = append(conditions, fmt.Sprintf("created_at <= $%d", idx))
|
||||
args = append(args, filters.EndTime)
|
||||
idx++
|
||||
}
|
||||
if filters.UserID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("user_id = $%d", idx))
|
||||
args = append(args, *filters.UserID)
|
||||
idx++
|
||||
}
|
||||
if filters.APIKeyID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", idx))
|
||||
args = append(args, *filters.APIKeyID)
|
||||
idx++
|
||||
}
|
||||
if filters.AccountID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("account_id = $%d", idx))
|
||||
args = append(args, *filters.AccountID)
|
||||
idx++
|
||||
}
|
||||
if filters.GroupID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("group_id = $%d", idx))
|
||||
args = append(args, *filters.GroupID)
|
||||
idx++
|
||||
}
|
||||
if filters.Model != nil {
|
||||
model := strings.TrimSpace(*filters.Model)
|
||||
if model != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("model = $%d", idx))
|
||||
args = append(args, model)
|
||||
idx++
|
||||
}
|
||||
}
|
||||
if filters.Stream != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("stream = $%d", idx))
|
||||
args = append(args, *filters.Stream)
|
||||
idx++
|
||||
}
|
||||
if filters.BillingType != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("billing_type = $%d", idx))
|
||||
args = append(args, *filters.BillingType)
|
||||
}
|
||||
return strings.Join(conditions, " AND "), args
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) createTaskWithEnt(ctx context.Context, task *service.UsageCleanupTask) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
filtersJSON, err := json.Marshal(task.Filters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal cleanup filters: %w", err)
|
||||
}
|
||||
created, err := client.UsageCleanupTask.
|
||||
Create().
|
||||
SetStatus(task.Status).
|
||||
SetFilters(json.RawMessage(filtersJSON)).
|
||||
SetCreatedBy(task.CreatedBy).
|
||||
SetDeletedRows(task.DeletedRows).
|
||||
Save(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
task.ID = created.ID
|
||||
task.CreatedAt = created.CreatedAt
|
||||
task.UpdatedAt = created.UpdatedAt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) createTaskWithSQL(ctx context.Context, task *service.UsageCleanupTask) error {
|
||||
filtersJSON, err := json.Marshal(task.Filters)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal cleanup filters: %w", err)
|
||||
}
|
||||
query := `
|
||||
INSERT INTO usage_cleanup_tasks (
|
||||
status,
|
||||
filters,
|
||||
created_by,
|
||||
deleted_rows
|
||||
) VALUES ($1, $2, $3, $4)
|
||||
RETURNING id, created_at, updated_at
|
||||
`
|
||||
if err := scanSingleRow(ctx, r.sql, query, []any{task.Status, filtersJSON, task.CreatedBy, task.DeletedRows}, &task.ID, &task.CreatedAt, &task.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) listTasksWithEnt(ctx context.Context, params pagination.PaginationParams) ([]service.UsageCleanupTask, *pagination.PaginationResult, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
query := client.UsageCleanupTask.Query()
|
||||
total, err := query.Clone().Count(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if total == 0 {
|
||||
return []service.UsageCleanupTask{}, paginationResultFromTotal(0, params), nil
|
||||
}
|
||||
rows, err := query.
|
||||
Order(dbent.Desc(dbusagecleanuptask.FieldCreatedAt), dbent.Desc(dbusagecleanuptask.FieldID)).
|
||||
Offset(params.Offset()).
|
||||
Limit(params.Limit()).
|
||||
All(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tasks := make([]service.UsageCleanupTask, 0, len(rows))
|
||||
for _, row := range rows {
|
||||
task, err := usageCleanupTaskFromEnt(row)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tasks = append(tasks, task)
|
||||
}
|
||||
return tasks, paginationResultFromTotal(int64(total), params), nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) getTaskStatusWithEnt(ctx context.Context, taskID int64) (string, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
task, err := client.UsageCleanupTask.Query().
|
||||
Where(dbusagecleanuptask.IDEQ(taskID)).
|
||||
Only(ctx)
|
||||
if err != nil {
|
||||
if dbent.IsNotFound(err) {
|
||||
return "", sql.ErrNoRows
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return task.Status, nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) updateTaskProgressWithEnt(ctx context.Context, taskID int64, deletedRows int64) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
now := time.Now()
|
||||
_, err := client.UsageCleanupTask.Update().
|
||||
Where(dbusagecleanuptask.IDEQ(taskID)).
|
||||
SetDeletedRows(deletedRows).
|
||||
SetUpdatedAt(now).
|
||||
Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) cancelTaskWithEnt(ctx context.Context, taskID int64, canceledBy int64) (bool, error) {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
now := time.Now()
|
||||
affected, err := client.UsageCleanupTask.Update().
|
||||
Where(
|
||||
dbusagecleanuptask.IDEQ(taskID),
|
||||
dbusagecleanuptask.StatusIn(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning),
|
||||
).
|
||||
SetStatus(service.UsageCleanupStatusCanceled).
|
||||
SetCanceledBy(canceledBy).
|
||||
SetCanceledAt(now).
|
||||
SetFinishedAt(now).
|
||||
ClearErrorMessage().
|
||||
SetUpdatedAt(now).
|
||||
Save(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return affected > 0, nil
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) markTaskSucceededWithEnt(ctx context.Context, taskID int64, deletedRows int64) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
now := time.Now()
|
||||
_, err := client.UsageCleanupTask.Update().
|
||||
Where(dbusagecleanuptask.IDEQ(taskID)).
|
||||
SetStatus(service.UsageCleanupStatusSucceeded).
|
||||
SetDeletedRows(deletedRows).
|
||||
SetFinishedAt(now).
|
||||
SetUpdatedAt(now).
|
||||
Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *usageCleanupRepository) markTaskFailedWithEnt(ctx context.Context, taskID int64, deletedRows int64, errorMsg string) error {
|
||||
client := clientFromContext(ctx, r.client)
|
||||
now := time.Now()
|
||||
_, err := client.UsageCleanupTask.Update().
|
||||
Where(dbusagecleanuptask.IDEQ(taskID)).
|
||||
SetStatus(service.UsageCleanupStatusFailed).
|
||||
SetDeletedRows(deletedRows).
|
||||
SetErrorMessage(errorMsg).
|
||||
SetFinishedAt(now).
|
||||
SetUpdatedAt(now).
|
||||
Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func usageCleanupTaskFromEnt(row *dbent.UsageCleanupTask) (service.UsageCleanupTask, error) {
|
||||
task := service.UsageCleanupTask{
|
||||
ID: row.ID,
|
||||
Status: row.Status,
|
||||
CreatedBy: row.CreatedBy,
|
||||
DeletedRows: row.DeletedRows,
|
||||
CreatedAt: row.CreatedAt,
|
||||
UpdatedAt: row.UpdatedAt,
|
||||
}
|
||||
if len(row.Filters) > 0 {
|
||||
if err := json.Unmarshal(row.Filters, &task.Filters); err != nil {
|
||||
return service.UsageCleanupTask{}, fmt.Errorf("parse cleanup filters: %w", err)
|
||||
}
|
||||
}
|
||||
if row.ErrorMessage != nil {
|
||||
task.ErrorMsg = row.ErrorMessage
|
||||
}
|
||||
if row.CanceledBy != nil {
|
||||
task.CanceledBy = row.CanceledBy
|
||||
}
|
||||
if row.CanceledAt != nil {
|
||||
task.CanceledAt = row.CanceledAt
|
||||
}
|
||||
if row.StartedAt != nil {
|
||||
task.StartedAt = row.StartedAt
|
||||
}
|
||||
if row.FinishedAt != nil {
|
||||
task.FinishedAt = row.FinishedAt
|
||||
}
|
||||
return task, nil
|
||||
}
|
||||
251
backend/internal/repository/usage_cleanup_repo_ent_test.go
Normal file
251
backend/internal/repository/usage_cleanup_repo_ent_test.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||
"github.com/Wei-Shaw/sub2api/ent/enttest"
|
||||
dbusagecleanuptask "github.com/Wei-Shaw/sub2api/ent/usagecleanuptask"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"entgo.io/ent/dialect"
|
||||
entsql "entgo.io/ent/dialect/sql"
|
||||
_ "modernc.org/sqlite"
|
||||
)
|
||||
|
||||
func newUsageCleanupEntRepo(t *testing.T) (*usageCleanupRepository, *dbent.Client) {
|
||||
t.Helper()
|
||||
db, err := sql.Open("sqlite", "file:usage_cleanup?mode=memory&cache=shared")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
_, err = db.Exec("PRAGMA foreign_keys = ON")
|
||||
require.NoError(t, err)
|
||||
|
||||
drv := entsql.OpenDB(dialect.SQLite, db)
|
||||
client := enttest.NewClient(t, enttest.WithOptions(dbent.Driver(drv)))
|
||||
t.Cleanup(func() { _ = client.Close() })
|
||||
|
||||
repo := &usageCleanupRepository{client: client, sql: db}
|
||||
return repo, client
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntCreateAndList(t *testing.T) {
|
||||
repo, _ := newUsageCleanupEntRepo(t)
|
||||
|
||||
start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end},
|
||||
CreatedBy: 9,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task))
|
||||
require.NotZero(t, task.ID)
|
||||
|
||||
task2 := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusRunning,
|
||||
Filters: service.UsageCleanupFilters{StartTime: start.Add(-24 * time.Hour), EndTime: end.Add(-24 * time.Hour)},
|
||||
CreatedBy: 10,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task2))
|
||||
|
||||
tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tasks, 2)
|
||||
require.Equal(t, int64(2), result.Total)
|
||||
require.Greater(t, tasks[0].ID, tasks[1].ID)
|
||||
require.Equal(t, start, tasks[1].Filters.StartTime)
|
||||
require.Equal(t, end, tasks[1].Filters.EndTime)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntListEmpty(t *testing.T) {
|
||||
repo, _ := newUsageCleanupEntRepo(t)
|
||||
|
||||
tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, tasks)
|
||||
require.Equal(t, int64(0), result.Total)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntGetStatusAndProgress(t *testing.T) {
|
||||
repo, client := newUsageCleanupEntRepo(t)
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 3,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task))
|
||||
|
||||
status, err := repo.GetTaskStatus(context.Background(), task.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.UsageCleanupStatusPending, status)
|
||||
|
||||
_, err = repo.GetTaskStatus(context.Background(), task.ID+99)
|
||||
require.ErrorIs(t, err, sql.ErrNoRows)
|
||||
|
||||
require.NoError(t, repo.UpdateTaskProgress(context.Background(), task.ID, 42))
|
||||
loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(42), loaded.DeletedRows)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntCancelAndFinish(t *testing.T) {
|
||||
repo, client := newUsageCleanupEntRepo(t)
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 5,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task))
|
||||
|
||||
ok, err := repo.CancelTask(context.Background(), task.ID, 7)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.UsageCleanupStatusCanceled, loaded.Status)
|
||||
require.NotNil(t, loaded.CanceledBy)
|
||||
require.NotNil(t, loaded.CanceledAt)
|
||||
require.NotNil(t, loaded.FinishedAt)
|
||||
|
||||
loaded.Status = service.UsageCleanupStatusSucceeded
|
||||
_, err = client.UsageCleanupTask.Update().Where(dbusagecleanuptask.IDEQ(task.ID)).SetStatus(loaded.Status).Save(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
ok, err = repo.CancelTask(context.Background(), task.ID, 7)
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntCancelError(t *testing.T) {
|
||||
repo, client := newUsageCleanupEntRepo(t)
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 5,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task))
|
||||
|
||||
require.NoError(t, client.Close())
|
||||
_, err := repo.CancelTask(context.Background(), task.ID, 7)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntMarkResults(t *testing.T) {
|
||||
repo, client := newUsageCleanupEntRepo(t)
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusRunning,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 12,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task))
|
||||
|
||||
require.NoError(t, repo.MarkTaskSucceeded(context.Background(), task.ID, 6))
|
||||
loaded, err := client.UsageCleanupTask.Get(context.Background(), task.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.UsageCleanupStatusSucceeded, loaded.Status)
|
||||
require.Equal(t, int64(6), loaded.DeletedRows)
|
||||
require.NotNil(t, loaded.FinishedAt)
|
||||
|
||||
task2 := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusRunning,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 12,
|
||||
}
|
||||
require.NoError(t, repo.CreateTask(context.Background(), task2))
|
||||
|
||||
require.NoError(t, repo.MarkTaskFailed(context.Background(), task2.ID, 4, "boom"))
|
||||
loaded2, err := client.UsageCleanupTask.Get(context.Background(), task2.ID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.UsageCleanupStatusFailed, loaded2.Status)
|
||||
require.Equal(t, "boom", *loaded2.ErrorMessage)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntInvalidStatus(t *testing.T) {
|
||||
repo, _ := newUsageCleanupEntRepo(t)
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: "invalid",
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now().UTC(), EndTime: time.Now().UTC().Add(time.Hour)},
|
||||
CreatedBy: 1,
|
||||
}
|
||||
require.Error(t, repo.CreateTask(context.Background(), task))
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryEntListInvalidFilters(t *testing.T) {
|
||||
repo, client := newUsageCleanupEntRepo(t)
|
||||
|
||||
now := time.Now().UTC()
|
||||
driver, ok := client.Driver().(*entsql.Driver)
|
||||
require.True(t, ok)
|
||||
_, err := driver.DB().ExecContext(
|
||||
context.Background(),
|
||||
`INSERT INTO usage_cleanup_tasks (status, filters, created_by, deleted_rows, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)`,
|
||||
service.UsageCleanupStatusPending,
|
||||
[]byte("invalid-json"),
|
||||
int64(1),
|
||||
int64(0),
|
||||
now,
|
||||
now,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 10})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUsageCleanupTaskFromEntFull(t *testing.T) {
|
||||
start := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
errMsg := "failed"
|
||||
canceledBy := int64(2)
|
||||
canceledAt := start.Add(time.Minute)
|
||||
startedAt := start.Add(2 * time.Minute)
|
||||
finishedAt := start.Add(3 * time.Minute)
|
||||
filters := service.UsageCleanupFilters{StartTime: start, EndTime: end}
|
||||
filtersJSON, err := json.Marshal(filters)
|
||||
require.NoError(t, err)
|
||||
|
||||
task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{
|
||||
ID: 10,
|
||||
Status: service.UsageCleanupStatusFailed,
|
||||
Filters: filtersJSON,
|
||||
CreatedBy: 11,
|
||||
DeletedRows: 7,
|
||||
ErrorMessage: &errMsg,
|
||||
CanceledBy: &canceledBy,
|
||||
CanceledAt: &canceledAt,
|
||||
StartedAt: &startedAt,
|
||||
FinishedAt: &finishedAt,
|
||||
CreatedAt: start,
|
||||
UpdatedAt: end,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(10), task.ID)
|
||||
require.Equal(t, service.UsageCleanupStatusFailed, task.Status)
|
||||
require.NotNil(t, task.ErrorMsg)
|
||||
require.NotNil(t, task.CanceledBy)
|
||||
require.NotNil(t, task.CanceledAt)
|
||||
require.NotNil(t, task.StartedAt)
|
||||
require.NotNil(t, task.FinishedAt)
|
||||
}
|
||||
|
||||
func TestUsageCleanupTaskFromEntInvalidFilters(t *testing.T) {
|
||||
task, err := usageCleanupTaskFromEnt(&dbent.UsageCleanupTask{
|
||||
Filters: json.RawMessage("invalid-json"),
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Empty(t, task)
|
||||
}
|
||||
482
backend/internal/repository/usage_cleanup_repo_test.go
Normal file
482
backend/internal/repository/usage_cleanup_repo_test.go
Normal file
@@ -0,0 +1,482 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
|
||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newSQLMock(t *testing.T) (*sql.DB, sqlmock.Sqlmock) {
|
||||
t.Helper()
|
||||
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherRegexp))
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { _ = db.Close() })
|
||||
return db, mock
|
||||
}
|
||||
|
||||
func TestNewUsageCleanupRepository(t *testing.T) {
|
||||
db, _ := newSQLMock(t)
|
||||
repo := NewUsageCleanupRepository(nil, db)
|
||||
require.NotNil(t, repo)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryCreateTask(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: start, EndTime: end},
|
||||
CreatedBy: 12,
|
||||
}
|
||||
now := time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
mock.ExpectQuery("INSERT INTO usage_cleanup_tasks").
|
||||
WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id", "created_at", "updated_at"}).AddRow(int64(1), now, now))
|
||||
|
||||
err := repo.CreateTask(context.Background(), task)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), task.ID)
|
||||
require.Equal(t, now, task.CreatedAt)
|
||||
require.Equal(t, now, task.UpdatedAt)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryCreateTaskNil(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
err := repo.CreateTask(context.Background(), nil)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryCreateTaskQueryError(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
task := &service.UsageCleanupTask{
|
||||
Status: service.UsageCleanupStatusPending,
|
||||
Filters: service.UsageCleanupFilters{StartTime: time.Now(), EndTime: time.Now().Add(time.Hour)},
|
||||
CreatedBy: 1,
|
||||
}
|
||||
|
||||
mock.ExpectQuery("INSERT INTO usage_cleanup_tasks").
|
||||
WithArgs(task.Status, sqlmock.AnyArg(), task.CreatedBy, task.DeletedRows).
|
||||
WillReturnError(sql.ErrConnDone)
|
||||
|
||||
err := repo.CreateTask(context.Background(), task)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryListTasksEmpty(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks").
|
||||
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0)))
|
||||
|
||||
tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, tasks)
|
||||
require.Equal(t, int64(0), result.Total)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryListTasks(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(2 * time.Hour)
|
||||
filters := service.UsageCleanupFilters{StartTime: start, EndTime: end}
|
||||
filtersJSON, err := json.Marshal(filters)
|
||||
require.NoError(t, err)
|
||||
|
||||
createdAt := time.Date(2024, 1, 2, 12, 0, 0, 0, time.UTC)
|
||||
updatedAt := createdAt.Add(time.Minute)
|
||||
rows := sqlmock.NewRows([]string{
|
||||
"id", "status", "filters", "created_by", "deleted_rows", "error_message",
|
||||
"canceled_by", "canceled_at",
|
||||
"started_at", "finished_at", "created_at", "updated_at",
|
||||
}).AddRow(
|
||||
int64(1),
|
||||
service.UsageCleanupStatusSucceeded,
|
||||
filtersJSON,
|
||||
int64(2),
|
||||
int64(9),
|
||||
"error",
|
||||
nil,
|
||||
nil,
|
||||
start,
|
||||
end,
|
||||
createdAt,
|
||||
updatedAt,
|
||||
)
|
||||
|
||||
mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks").
|
||||
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1)))
|
||||
mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message").
|
||||
WithArgs(20, 0).
|
||||
WillReturnRows(rows)
|
||||
|
||||
tasks, result, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, tasks, 1)
|
||||
require.Equal(t, int64(1), tasks[0].ID)
|
||||
require.Equal(t, service.UsageCleanupStatusSucceeded, tasks[0].Status)
|
||||
require.Equal(t, int64(2), tasks[0].CreatedBy)
|
||||
require.Equal(t, int64(9), tasks[0].DeletedRows)
|
||||
require.NotNil(t, tasks[0].ErrorMsg)
|
||||
require.Equal(t, "error", *tasks[0].ErrorMsg)
|
||||
require.NotNil(t, tasks[0].StartedAt)
|
||||
require.NotNil(t, tasks[0].FinishedAt)
|
||||
require.Equal(t, int64(1), result.Total)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryListTasksQueryError(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks").
|
||||
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(2)))
|
||||
mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message").
|
||||
WithArgs(20, 0).
|
||||
WillReturnError(sql.ErrConnDone)
|
||||
|
||||
_, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20})
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryListTasksInvalidFilters(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
rows := sqlmock.NewRows([]string{
|
||||
"id", "status", "filters", "created_by", "deleted_rows", "error_message",
|
||||
"canceled_by", "canceled_at",
|
||||
"started_at", "finished_at", "created_at", "updated_at",
|
||||
}).AddRow(
|
||||
int64(1),
|
||||
service.UsageCleanupStatusSucceeded,
|
||||
[]byte("not-json"),
|
||||
int64(2),
|
||||
int64(9),
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
time.Now().UTC(),
|
||||
time.Now().UTC(),
|
||||
)
|
||||
|
||||
mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_cleanup_tasks").
|
||||
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1)))
|
||||
mock.ExpectQuery("SELECT id, status, filters, created_by, deleted_rows, error_message").
|
||||
WithArgs(20, 0).
|
||||
WillReturnRows(rows)
|
||||
|
||||
_, _, err := repo.ListTasks(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20})
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryClaimNextPendingTaskNone(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning).
|
||||
WillReturnRows(sqlmock.NewRows([]string{
|
||||
"id", "status", "filters", "created_by", "deleted_rows", "error_message",
|
||||
"started_at", "finished_at", "created_at", "updated_at",
|
||||
}))
|
||||
|
||||
task, err := repo.ClaimNextPendingTask(context.Background(), 1800)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, task)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryClaimNextPendingTask(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
filters := service.UsageCleanupFilters{StartTime: start, EndTime: end}
|
||||
filtersJSON, err := json.Marshal(filters)
|
||||
require.NoError(t, err)
|
||||
|
||||
rows := sqlmock.NewRows([]string{
|
||||
"id", "status", "filters", "created_by", "deleted_rows", "error_message",
|
||||
"started_at", "finished_at", "created_at", "updated_at",
|
||||
}).AddRow(
|
||||
int64(4),
|
||||
service.UsageCleanupStatusRunning,
|
||||
filtersJSON,
|
||||
int64(7),
|
||||
int64(0),
|
||||
nil,
|
||||
start,
|
||||
nil,
|
||||
start,
|
||||
start,
|
||||
)
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning).
|
||||
WillReturnRows(rows)
|
||||
|
||||
task, err := repo.ClaimNextPendingTask(context.Background(), 1800)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, task)
|
||||
require.Equal(t, int64(4), task.ID)
|
||||
require.Equal(t, service.UsageCleanupStatusRunning, task.Status)
|
||||
require.Equal(t, int64(7), task.CreatedBy)
|
||||
require.NotNil(t, task.StartedAt)
|
||||
require.Nil(t, task.ErrorMsg)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryClaimNextPendingTaskError(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning).
|
||||
WillReturnError(sql.ErrConnDone)
|
||||
|
||||
_, err := repo.ClaimNextPendingTask(context.Background(), 1800)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryClaimNextPendingTaskInvalidFilters(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
rows := sqlmock.NewRows([]string{
|
||||
"id", "status", "filters", "created_by", "deleted_rows", "error_message",
|
||||
"started_at", "finished_at", "created_at", "updated_at",
|
||||
}).AddRow(
|
||||
int64(4),
|
||||
service.UsageCleanupStatusRunning,
|
||||
[]byte("invalid"),
|
||||
int64(7),
|
||||
int64(0),
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
time.Now().UTC(),
|
||||
time.Now().UTC(),
|
||||
)
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning, int64(1800), service.UsageCleanupStatusRunning).
|
||||
WillReturnRows(rows)
|
||||
|
||||
_, err := repo.ClaimNextPendingTask(context.Background(), 1800)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryMarkTaskSucceeded(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectExec("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusSucceeded, int64(12), int64(9)).
|
||||
WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
|
||||
err := repo.MarkTaskSucceeded(context.Background(), 9, 12)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryMarkTaskFailed(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectExec("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusFailed, int64(4), "boom", int64(2)).
|
||||
WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
|
||||
err := repo.MarkTaskFailed(context.Background(), 2, 4, "boom")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryGetTaskStatus(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks").
|
||||
WithArgs(int64(9)).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow(service.UsageCleanupStatusPending))
|
||||
|
||||
status, err := repo.GetTaskStatus(context.Background(), 9)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, service.UsageCleanupStatusPending, status)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryGetTaskStatusQueryError(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("SELECT status FROM usage_cleanup_tasks").
|
||||
WithArgs(int64(9)).
|
||||
WillReturnError(sql.ErrConnDone)
|
||||
|
||||
_, err := repo.GetTaskStatus(context.Background(), 9)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryUpdateTaskProgress(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectExec("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(int64(123), int64(8)).
|
||||
WillReturnResult(sqlmock.NewResult(0, 1))
|
||||
|
||||
err := repo.UpdateTaskProgress(context.Background(), 8, 123)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryCancelTask(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(6)))
|
||||
|
||||
ok, err := repo.CancelTask(context.Background(), 6, 9)
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryCancelTaskNoRows(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
mock.ExpectQuery("UPDATE usage_cleanup_tasks").
|
||||
WithArgs(service.UsageCleanupStatusCanceled, int64(6), int64(9), service.UsageCleanupStatusPending, service.UsageCleanupStatusRunning).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}))
|
||||
|
||||
ok, err := repo.CancelTask(context.Background(), 6, 9)
|
||||
require.NoError(t, err)
|
||||
require.False(t, ok)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryDeleteUsageLogsBatchMissingRange(t *testing.T) {
|
||||
db, _ := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
_, err := repo.DeleteUsageLogsBatch(context.Background(), service.UsageCleanupFilters{}, 10)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryDeleteUsageLogsBatch(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
userID := int64(3)
|
||||
model := " gpt-4 "
|
||||
filters := service.UsageCleanupFilters{
|
||||
StartTime: start,
|
||||
EndTime: end,
|
||||
UserID: &userID,
|
||||
Model: &model,
|
||||
}
|
||||
|
||||
mock.ExpectQuery("DELETE FROM usage_logs").
|
||||
WithArgs(start, end, userID, "gpt-4", 2).
|
||||
WillReturnRows(sqlmock.NewRows([]string{"id"}).AddRow(int64(1)).AddRow(int64(2)))
|
||||
|
||||
deleted, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(2), deleted)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestUsageCleanupRepositoryDeleteUsageLogsBatchQueryError(t *testing.T) {
|
||||
db, mock := newSQLMock(t)
|
||||
repo := &usageCleanupRepository{sql: db}
|
||||
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
filters := service.UsageCleanupFilters{StartTime: start, EndTime: end}
|
||||
|
||||
mock.ExpectQuery("DELETE FROM usage_logs").
|
||||
WithArgs(start, end, 5).
|
||||
WillReturnError(sql.ErrConnDone)
|
||||
|
||||
_, err := repo.DeleteUsageLogsBatch(context.Background(), filters, 5)
|
||||
require.Error(t, err)
|
||||
require.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestBuildUsageCleanupWhere(t *testing.T) {
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
userID := int64(1)
|
||||
apiKeyID := int64(2)
|
||||
accountID := int64(3)
|
||||
groupID := int64(4)
|
||||
model := " gpt-4 "
|
||||
stream := true
|
||||
billingType := int8(2)
|
||||
|
||||
where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{
|
||||
StartTime: start,
|
||||
EndTime: end,
|
||||
UserID: &userID,
|
||||
APIKeyID: &apiKeyID,
|
||||
AccountID: &accountID,
|
||||
GroupID: &groupID,
|
||||
Model: &model,
|
||||
Stream: &stream,
|
||||
BillingType: &billingType,
|
||||
})
|
||||
|
||||
require.Equal(t, "created_at >= $1 AND created_at <= $2 AND user_id = $3 AND api_key_id = $4 AND account_id = $5 AND group_id = $6 AND model = $7 AND stream = $8 AND billing_type = $9", where)
|
||||
require.Equal(t, []any{start, end, userID, apiKeyID, accountID, groupID, "gpt-4", stream, billingType}, args)
|
||||
}
|
||||
|
||||
func TestBuildUsageCleanupWhereModelEmpty(t *testing.T) {
|
||||
start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
end := start.Add(24 * time.Hour)
|
||||
model := " "
|
||||
|
||||
where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{
|
||||
StartTime: start,
|
||||
EndTime: end,
|
||||
Model: &model,
|
||||
})
|
||||
|
||||
require.Equal(t, "created_at >= $1 AND created_at <= $2", where)
|
||||
require.Equal(t, []any{start, end}, args)
|
||||
}
|
||||
@@ -1411,7 +1411,7 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe
|
||||
}
|
||||
|
||||
// GetUsageTrendWithFilters returns usage trend data with optional filters
|
||||
func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) {
|
||||
func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) (results []TrendDataPoint, err error) {
|
||||
dateFormat := "YYYY-MM-DD"
|
||||
if granularity == "hour" {
|
||||
dateFormat = "YYYY-MM-DD HH24:00"
|
||||
@@ -1456,6 +1456,10 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
|
||||
query += fmt.Sprintf(" AND stream = $%d", len(args)+1)
|
||||
args = append(args, *stream)
|
||||
}
|
||||
if billingType != nil {
|
||||
query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1)
|
||||
args = append(args, int16(*billingType))
|
||||
}
|
||||
query += " GROUP BY date ORDER BY date ASC"
|
||||
|
||||
rows, err := r.sql.QueryContext(ctx, query, args...)
|
||||
@@ -1479,7 +1483,7 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
|
||||
}
|
||||
|
||||
// GetModelStatsWithFilters returns model statistics with optional filters
|
||||
func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) {
|
||||
func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) (results []ModelStat, err error) {
|
||||
actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost"
|
||||
// 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。
|
||||
if accountID > 0 && userID == 0 && apiKeyID == 0 {
|
||||
@@ -1520,6 +1524,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start
|
||||
query += fmt.Sprintf(" AND stream = $%d", len(args)+1)
|
||||
args = append(args, *stream)
|
||||
}
|
||||
if billingType != nil {
|
||||
query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1)
|
||||
args = append(args, int16(*billingType))
|
||||
}
|
||||
query += " GROUP BY model ORDER BY total_tokens DESC"
|
||||
|
||||
rows, err := r.sql.QueryContext(ctx, query, args...)
|
||||
@@ -1825,7 +1833,7 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
|
||||
}
|
||||
}
|
||||
|
||||
models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil)
|
||||
models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil, nil)
|
||||
if err != nil {
|
||||
models = []ModelStat{}
|
||||
}
|
||||
|
||||
@@ -944,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() {
|
||||
endTime := base.Add(48 * time.Hour)
|
||||
|
||||
// Test with user filter
|
||||
trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil)
|
||||
trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil, nil)
|
||||
s.Require().NoError(err, "GetUsageTrendWithFilters user filter")
|
||||
s.Require().Len(trend, 2)
|
||||
|
||||
// Test with apiKey filter
|
||||
trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil)
|
||||
trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil, nil)
|
||||
s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter")
|
||||
s.Require().Len(trend, 2)
|
||||
|
||||
// Test with both filters
|
||||
trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil)
|
||||
trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil, nil)
|
||||
s.Require().NoError(err, "GetUsageTrendWithFilters both filters")
|
||||
s.Require().Len(trend, 2)
|
||||
}
|
||||
@@ -971,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() {
|
||||
startTime := base.Add(-1 * time.Hour)
|
||||
endTime := base.Add(3 * time.Hour)
|
||||
|
||||
trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil)
|
||||
trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil, nil)
|
||||
s.Require().NoError(err, "GetUsageTrendWithFilters hourly")
|
||||
s.Require().Len(trend, 2)
|
||||
}
|
||||
@@ -1017,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() {
|
||||
endTime := base.Add(2 * time.Hour)
|
||||
|
||||
// Test with user filter
|
||||
stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil)
|
||||
stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil, nil)
|
||||
s.Require().NoError(err, "GetModelStatsWithFilters user filter")
|
||||
s.Require().Len(stats, 2)
|
||||
|
||||
// Test with apiKey filter
|
||||
stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil)
|
||||
stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil, nil)
|
||||
s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter")
|
||||
s.Require().Len(stats, 2)
|
||||
|
||||
// Test with account filter
|
||||
stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil)
|
||||
stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil, nil)
|
||||
s.Require().NoError(err, "GetModelStatsWithFilters account filter")
|
||||
s.Require().Len(stats, 2)
|
||||
}
|
||||
|
||||
@@ -57,6 +57,7 @@ var ProviderSet = wire.NewSet(
|
||||
NewRedeemCodeRepository,
|
||||
NewPromoCodeRepository,
|
||||
NewUsageLogRepository,
|
||||
NewUsageCleanupRepository,
|
||||
NewDashboardAggregationRepository,
|
||||
NewSettingRepository,
|
||||
NewOpsRepository,
|
||||
|
||||
Reference in New Issue
Block a user