chore(logging): 完成后端日志审计与结构化迁移
- 将高密度服务与处理器日志迁移到新日志系统(LegacyPrintf/结构化日志) - 增加 stdlog bridge 与兼容测试,保留旧日志捕获能力 - 将 OpenAI 断流告警改为结构化 Warn 并改造对应测试为 sink 捕获 - 补齐后端相关文件 logger 引用并通过全量 go test
This commit is contained in:
@@ -4,13 +4,13 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||
"github.com/Wei-Shaw/sub2api/internal/pkg/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -104,7 +104,7 @@ func (s *SchedulerSnapshotService) ListSchedulableAccounts(ctx context.Context,
|
||||
if s.cache != nil {
|
||||
cached, hit, err := s.cache.GetSnapshot(ctx, bucket)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] cache read failed: bucket=%s err=%v", bucket.String(), err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] cache read failed: bucket=%s err=%v", bucket.String(), err)
|
||||
} else if hit {
|
||||
return derefAccounts(cached), useMixed, nil
|
||||
}
|
||||
@@ -124,7 +124,7 @@ func (s *SchedulerSnapshotService) ListSchedulableAccounts(ctx context.Context,
|
||||
|
||||
if s.cache != nil {
|
||||
if err := s.cache.SetSnapshot(fallbackCtx, bucket, accounts); err != nil {
|
||||
log.Printf("[Scheduler] cache write failed: bucket=%s err=%v", bucket.String(), err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] cache write failed: bucket=%s err=%v", bucket.String(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func (s *SchedulerSnapshotService) GetAccount(ctx context.Context, accountID int
|
||||
if s.cache != nil {
|
||||
account, err := s.cache.GetAccount(ctx, accountID)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] account cache read failed: id=%d err=%v", accountID, err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] account cache read failed: id=%d err=%v", accountID, err)
|
||||
} else if account != nil {
|
||||
return account, nil
|
||||
}
|
||||
@@ -168,17 +168,17 @@ func (s *SchedulerSnapshotService) runInitialRebuild() {
|
||||
defer cancel()
|
||||
buckets, err := s.cache.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] list buckets failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] list buckets failed: %v", err)
|
||||
}
|
||||
if len(buckets) == 0 {
|
||||
buckets, err = s.defaultBuckets(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] default buckets failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] default buckets failed: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := s.rebuildBuckets(ctx, buckets, "startup"); err != nil {
|
||||
log.Printf("[Scheduler] rebuild startup failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] rebuild startup failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,7 +205,7 @@ func (s *SchedulerSnapshotService) runFullRebuildWorker(interval time.Duration)
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := s.triggerFullRebuild("interval"); err != nil {
|
||||
log.Printf("[Scheduler] full rebuild failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] full rebuild failed: %v", err)
|
||||
}
|
||||
case <-s.stopCh:
|
||||
return
|
||||
@@ -222,13 +222,13 @@ func (s *SchedulerSnapshotService) pollOutbox() {
|
||||
|
||||
watermark, err := s.cache.GetOutboxWatermark(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] outbox watermark read failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox watermark read failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
events, err := s.outboxRepo.ListAfter(ctx, watermark, 200)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] outbox poll failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox poll failed: %v", err)
|
||||
return
|
||||
}
|
||||
if len(events) == 0 {
|
||||
@@ -241,14 +241,14 @@ func (s *SchedulerSnapshotService) pollOutbox() {
|
||||
err := s.handleOutboxEvent(eventCtx, event)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] outbox handle failed: id=%d type=%s err=%v", event.ID, event.EventType, err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox handle failed: id=%d type=%s err=%v", event.ID, event.EventType, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lastID := events[len(events)-1].ID
|
||||
if err := s.cache.SetOutboxWatermark(ctx, lastID); err != nil {
|
||||
log.Printf("[Scheduler] outbox watermark write failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox watermark write failed: %v", err)
|
||||
} else {
|
||||
watermarkForCheck = lastID
|
||||
}
|
||||
@@ -445,11 +445,11 @@ func (s *SchedulerSnapshotService) rebuildBucket(ctx context.Context, bucket Sch
|
||||
|
||||
accounts, err := s.loadAccountsFromDB(rebuildCtx, bucket, bucket.Mode == SchedulerModeMixed)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] rebuild failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] rebuild failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cache.SetSnapshot(rebuildCtx, bucket, accounts); err != nil {
|
||||
log.Printf("[Scheduler] rebuild cache failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] rebuild cache failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err)
|
||||
return err
|
||||
}
|
||||
slog.Debug("[Scheduler] rebuild ok", "bucket", bucket.String(), "reason", reason, "size", len(accounts))
|
||||
@@ -465,13 +465,13 @@ func (s *SchedulerSnapshotService) triggerFullRebuild(reason string) error {
|
||||
|
||||
buckets, err := s.cache.ListBuckets(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] list buckets failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] list buckets failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(buckets) == 0 {
|
||||
buckets, err = s.defaultBuckets(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[Scheduler] default buckets failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] default buckets failed: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func (s *SchedulerSnapshotService) checkOutboxLag(ctx context.Context, oldest Sc
|
||||
|
||||
lag := time.Since(oldest.CreatedAt)
|
||||
if lagSeconds := int(lag.Seconds()); lagSeconds >= s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds && s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds > 0 {
|
||||
log.Printf("[Scheduler] outbox lag warning: %ds", lagSeconds)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox lag warning: %ds", lagSeconds)
|
||||
}
|
||||
|
||||
if s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 && int(lag.Seconds()) >= s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds {
|
||||
@@ -495,12 +495,12 @@ func (s *SchedulerSnapshotService) checkOutboxLag(ctx context.Context, oldest Sc
|
||||
s.lagMu.Unlock()
|
||||
|
||||
if failures >= s.cfg.Gateway.Scheduling.OutboxLagRebuildFailures {
|
||||
log.Printf("[Scheduler] outbox lag rebuild triggered: lag=%s failures=%d", lag, failures)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox lag rebuild triggered: lag=%s failures=%d", lag, failures)
|
||||
s.lagMu.Lock()
|
||||
s.lagFailures = 0
|
||||
s.lagMu.Unlock()
|
||||
if err := s.triggerFullRebuild("outbox_lag"); err != nil {
|
||||
log.Printf("[Scheduler] outbox lag rebuild failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox lag rebuild failed: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -518,9 +518,9 @@ func (s *SchedulerSnapshotService) checkOutboxLag(ctx context.Context, oldest Sc
|
||||
return
|
||||
}
|
||||
if maxID-watermark >= int64(threshold) {
|
||||
log.Printf("[Scheduler] outbox backlog rebuild triggered: backlog=%d", maxID-watermark)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox backlog rebuild triggered: backlog=%d", maxID-watermark)
|
||||
if err := s.triggerFullRebuild("outbox_backlog"); err != nil {
|
||||
log.Printf("[Scheduler] outbox backlog rebuild failed: %v", err)
|
||||
logger.LegacyPrintf("service.scheduler_snapshot", "[Scheduler] outbox backlog rebuild failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user