fix: 修复 golangci-lint depguard 和 gofmt 错误

将 redis leader lock 逻辑从 service 层抽取为 LeaderLocker 接口,
实现移至 repository 层,消除 service 层对 redis 的直接依赖。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
guoyongchang
2026-03-05 16:28:48 +08:00
parent 8adf80d98b
commit 9a8dacc514
6 changed files with 115 additions and 70 deletions

View File

@@ -229,7 +229,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig, tempUnschedCache)
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository)
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, db, redisClient, configConfig)
leaderLocker := repository.NewLeaderLocker(db, redisClient)
scheduledTestRunnerService := service.ProvideScheduledTestRunnerService(scheduledTestPlanRepository, scheduledTestService, accountTestService, leaderLocker, configConfig)
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService, scheduledTestRunnerService)
application := &Application{
Server: httpServer,

View File

@@ -3,9 +3,14 @@ package repository
import (
"context"
"database/sql"
"hash/fnv"
"sync"
"time"
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/google/uuid"
"github.com/redis/go-redis/v9"
)
// --- Plan Repository ---
@@ -181,3 +186,83 @@ func scanPlans(rows *sql.Rows) ([]*service.ScheduledTestPlan, error) {
}
return plans, rows.Err()
}
// --- LeaderLocker ---
var redisReleaseScript = redis.NewScript(`
if redis.call("GET", KEYS[1]) == ARGV[1] then
return redis.call("DEL", KEYS[1])
end
return 0
`)
type leaderLocker struct {
db *sql.DB
redisClient *redis.Client
instanceID string
warnNoRedisOnce sync.Once
}
// NewLeaderLocker creates a LeaderLocker that tries Redis first, then falls back to pg advisory lock.
func NewLeaderLocker(db *sql.DB, redisClient *redis.Client) service.LeaderLocker {
return &leaderLocker{
db: db,
redisClient: redisClient,
instanceID: uuid.NewString(),
}
}
func (l *leaderLocker) TryAcquire(ctx context.Context, key string, ttl time.Duration) (func(), bool) {
if l.redisClient != nil {
ok, err := l.redisClient.SetNX(ctx, key, l.instanceID, ttl).Result()
if err == nil {
if !ok {
return nil, false
}
return func() {
_, _ = redisReleaseScript.Run(ctx, l.redisClient, []string{key}, l.instanceID).Result()
}, true
}
l.warnNoRedisOnce.Do(func() {
logger.LegacyPrintf("repository.leader_locker", "[LeaderLocker] Redis SetNX failed; falling back to DB advisory lock: %v", err)
})
} else {
l.warnNoRedisOnce.Do(func() {
logger.LegacyPrintf("repository.leader_locker", "[LeaderLocker] Redis not configured; using DB advisory lock")
})
}
// Fallback: pg_try_advisory_lock
return tryAdvisoryLock(ctx, l.db, hashLockID(key))
}
func tryAdvisoryLock(ctx context.Context, db *sql.DB, lockID int64) (func(), bool) {
if db == nil {
return nil, false
}
conn, err := db.Conn(ctx)
if err != nil {
return nil, false
}
acquired := false
if err := conn.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", lockID).Scan(&acquired); err != nil {
_ = conn.Close()
return nil, false
}
if !acquired {
_ = conn.Close()
return nil, false
}
return func() {
unlockCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
_, _ = conn.ExecContext(unlockCtx, "SELECT pg_advisory_unlock($1)", lockID)
_ = conn.Close()
}, true
}
func hashLockID(key string) int64 {
h := fnv.New64a()
_, _ = h.Write([]byte(key))
return int64(h.Sum64())
}

View File

@@ -53,9 +53,10 @@ var ProviderSet = wire.NewSet(
NewAPIKeyRepository,
NewGroupRepository,
NewAccountRepository,
NewSoraAccountRepository, // Sora 账号扩展表仓储
NewScheduledTestPlanRepository, // 定时测试计划仓储
NewScheduledTestResultRepository, // 定时测试结果仓储
NewSoraAccountRepository, // Sora 账号扩展表仓储
NewScheduledTestPlanRepository, // 定时测试计划仓储
NewScheduledTestResultRepository, // 定时测试结果仓储
NewLeaderLocker, // 分布式 leader 选举
NewProxyRepository,
NewRedeemCodeRepository,
NewPromoCodeRepository,

View File

@@ -59,3 +59,10 @@ type ScheduledTestResultRepository interface {
ListByPlanID(ctx context.Context, planID int64, limit int) ([]*ScheduledTestResult, error)
PruneOldResults(ctx context.Context, planID int64, keepCount int) error
}
// LeaderLocker provides distributed leader election for background runners.
// TryAcquire attempts to acquire a named lock and returns a release function
// and true if successful, or nil and false if the lock is held by another instance.
type LeaderLocker interface {
TryAcquire(ctx context.Context, key string, ttl time.Duration) (release func(), ok bool)
}

View File

@@ -2,14 +2,11 @@ package service
import (
"context"
"database/sql"
"sync"
"time"
"github.com/Wei-Shaw/sub2api/internal/config"
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
"github.com/google/uuid"
"github.com/redis/go-redis/v9"
"github.com/robfig/cron/v3"
)
@@ -19,28 +16,17 @@ const (
scheduledTestDefaultMaxWorkers = 10
)
var scheduledTestReleaseScript = redis.NewScript(`
if redis.call("GET", KEYS[1]) == ARGV[1] then
return redis.call("DEL", KEYS[1])
end
return 0
`)
// ScheduledTestRunnerService periodically scans due test plans and executes them.
type ScheduledTestRunnerService struct {
planRepo ScheduledTestPlanRepository
scheduledSvc *ScheduledTestService
accountTestSvc *AccountTestService
db *sql.DB
redisClient *redis.Client
locker LeaderLocker
cfg *config.Config
instanceID string
cron *cron.Cron
startOnce sync.Once
stopOnce sync.Once
warnNoRedisOnce sync.Once
cron *cron.Cron
startOnce sync.Once
stopOnce sync.Once
}
// NewScheduledTestRunnerService creates a new runner.
@@ -48,18 +34,15 @@ func NewScheduledTestRunnerService(
planRepo ScheduledTestPlanRepository,
scheduledSvc *ScheduledTestService,
accountTestSvc *AccountTestService,
db *sql.DB,
redisClient *redis.Client,
locker LeaderLocker,
cfg *config.Config,
) *ScheduledTestRunnerService {
return &ScheduledTestRunnerService{
planRepo: planRepo,
scheduledSvc: scheduledSvc,
accountTestSvc: accountTestSvc,
db: db,
redisClient: redisClient,
locker: locker,
cfg: cfg,
instanceID: uuid.NewString(),
}
}
@@ -112,12 +95,15 @@ func (s *ScheduledTestRunnerService) runScheduled() {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
release, ok := s.tryAcquireLeaderLock(ctx)
if !ok {
return
}
if release != nil {
defer release()
// Skip leader election in simple mode.
if s.cfg == nil || s.cfg.RunMode != config.RunModeSimple {
release, ok := s.locker.TryAcquire(ctx, scheduledTestLeaderLockKey, scheduledTestLeaderLockTTL)
if !ok {
return
}
if release != nil {
defer release()
}
}
now := time.Now()
@@ -171,37 +157,3 @@ func (s *ScheduledTestRunnerService) runOnePlan(ctx context.Context, plan *Sched
logger.LegacyPrintf("service.scheduled_test_runner", "[ScheduledTestRunner] plan=%d UpdateAfterRun error: %v", plan.ID, err)
}
}
func (s *ScheduledTestRunnerService) tryAcquireLeaderLock(ctx context.Context) (func(), bool) {
if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple {
return nil, true
}
key := scheduledTestLeaderLockKey
ttl := scheduledTestLeaderLockTTL
if s.redisClient != nil {
ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result()
if err == nil {
if !ok {
return nil, false
}
return func() {
_, _ = scheduledTestReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result()
}, true
}
s.warnNoRedisOnce.Do(func() {
logger.LegacyPrintf("service.scheduled_test_runner", "[ScheduledTestRunner] Redis SetNX failed; falling back to DB advisory lock: %v", err)
})
} else {
s.warnNoRedisOnce.Do(func() {
logger.LegacyPrintf("service.scheduled_test_runner", "[ScheduledTestRunner] Redis not configured; using DB advisory lock")
})
}
release, ok := tryAcquireDBAdvisoryLock(ctx, s.db, hashAdvisoryLockID(key))
if !ok {
return nil, false
}
return release, true
}

View File

@@ -287,11 +287,10 @@ func ProvideScheduledTestRunnerService(
planRepo ScheduledTestPlanRepository,
scheduledSvc *ScheduledTestService,
accountTestSvc *AccountTestService,
db *sql.DB,
redisClient *redis.Client,
locker LeaderLocker,
cfg *config.Config,
) *ScheduledTestRunnerService {
svc := NewScheduledTestRunnerService(planRepo, scheduledSvc, accountTestSvc, db, redisClient, cfg)
svc := NewScheduledTestRunnerService(planRepo, scheduledSvc, accountTestSvc, locker, cfg)
svc.Start()
return svc
}