first commit
This commit is contained in:
172
backend/migrations/001_init.sql
Normal file
172
backend/migrations/001_init.sql
Normal file
@@ -0,0 +1,172 @@
|
||||
-- Sub2API 初始化数据库迁移脚本
|
||||
-- PostgreSQL 15+
|
||||
|
||||
-- 1. proxies 代理IP表(无外键依赖)
|
||||
CREATE TABLE IF NOT EXISTS proxies (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
protocol VARCHAR(20) NOT NULL, -- http/https/socks5
|
||||
host VARCHAR(255) NOT NULL,
|
||||
port INT NOT NULL,
|
||||
username VARCHAR(100),
|
||||
password VARCHAR(100),
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_proxies_status ON proxies(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_proxies_deleted_at ON proxies(deleted_at);
|
||||
|
||||
-- 2. groups 分组表(无外键依赖)
|
||||
CREATE TABLE IF NOT EXISTS groups (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL UNIQUE,
|
||||
description TEXT,
|
||||
rate_multiplier DECIMAL(10, 4) NOT NULL DEFAULT 1.0, -- 费率倍率
|
||||
is_exclusive BOOLEAN NOT NULL DEFAULT FALSE, -- 是否专属分组
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_name ON groups(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_status ON groups(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_is_exclusive ON groups(is_exclusive);
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_deleted_at ON groups(deleted_at);
|
||||
|
||||
-- 3. users 用户表(无外键依赖)
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
role VARCHAR(20) NOT NULL DEFAULT 'user', -- admin/user
|
||||
balance DECIMAL(20, 8) NOT NULL DEFAULT 0, -- 余额(可为负数)
|
||||
concurrency INT NOT NULL DEFAULT 5, -- 并发数限制
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled
|
||||
allowed_groups BIGINT[] DEFAULT NULL, -- 允许绑定的分组ID列表
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_status ON users(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_users_deleted_at ON users(deleted_at);
|
||||
|
||||
-- 4. accounts 上游账号表(依赖proxies)
|
||||
CREATE TABLE IF NOT EXISTS accounts (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
name VARCHAR(100) NOT NULL,
|
||||
platform VARCHAR(50) NOT NULL, -- anthropic/openai/gemini
|
||||
type VARCHAR(20) NOT NULL, -- oauth/apikey
|
||||
credentials JSONB NOT NULL DEFAULT '{}', -- 凭证信息(加密存储)
|
||||
extra JSONB NOT NULL DEFAULT '{}', -- 扩展信息
|
||||
proxy_id BIGINT REFERENCES proxies(id) ON DELETE SET NULL,
|
||||
concurrency INT NOT NULL DEFAULT 3, -- 账号并发限制
|
||||
priority INT NOT NULL DEFAULT 50, -- 调度优先级(1-100,越小越高)
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled/error
|
||||
error_message TEXT,
|
||||
last_used_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_platform ON accounts(platform);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_type ON accounts(type);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_status ON accounts(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_proxy_id ON accounts(proxy_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_priority ON accounts(priority);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_last_used_at ON accounts(last_used_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_deleted_at ON accounts(deleted_at);
|
||||
|
||||
-- 5. api_keys API密钥表(依赖users, groups)
|
||||
CREATE TABLE IF NOT EXISTS api_keys (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
key VARCHAR(64) NOT NULL UNIQUE, -- sk-xxx格式
|
||||
name VARCHAR(100) NOT NULL,
|
||||
group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_key ON api_keys(key);
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_user_id ON api_keys(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_group_id ON api_keys(group_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_status ON api_keys(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_api_keys_deleted_at ON api_keys(deleted_at);
|
||||
|
||||
-- 6. account_groups 账号-分组关联表(依赖accounts, groups)
|
||||
CREATE TABLE IF NOT EXISTS account_groups (
|
||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
||||
group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
||||
priority INT NOT NULL DEFAULT 50, -- 分组内优先级
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY (account_id, group_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_account_groups_group_id ON account_groups(group_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_account_groups_priority ON account_groups(priority);
|
||||
|
||||
-- 7. redeem_codes 卡密表(依赖users)
|
||||
CREATE TABLE IF NOT EXISTS redeem_codes (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
code VARCHAR(32) NOT NULL UNIQUE, -- 兑换码
|
||||
type VARCHAR(20) NOT NULL DEFAULT 'balance', -- balance
|
||||
value DECIMAL(20, 8) NOT NULL, -- 面值(USD)
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'unused', -- unused/used
|
||||
used_by BIGINT REFERENCES users(id) ON DELETE SET NULL,
|
||||
used_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_redeem_codes_code ON redeem_codes(code);
|
||||
CREATE INDEX IF NOT EXISTS idx_redeem_codes_status ON redeem_codes(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_redeem_codes_used_by ON redeem_codes(used_by);
|
||||
|
||||
-- 8. usage_logs 使用记录表(依赖users, api_keys, accounts)
|
||||
CREATE TABLE IF NOT EXISTS usage_logs (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
api_key_id BIGINT NOT NULL REFERENCES api_keys(id) ON DELETE CASCADE,
|
||||
account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE,
|
||||
request_id VARCHAR(64),
|
||||
model VARCHAR(100) NOT NULL,
|
||||
|
||||
-- Token使用量(4类)
|
||||
input_tokens INT NOT NULL DEFAULT 0,
|
||||
output_tokens INT NOT NULL DEFAULT 0,
|
||||
cache_creation_tokens INT NOT NULL DEFAULT 0,
|
||||
cache_read_tokens INT NOT NULL DEFAULT 0,
|
||||
|
||||
-- 详细的缓存创建分类
|
||||
cache_creation_5m_tokens INT NOT NULL DEFAULT 0,
|
||||
cache_creation_1h_tokens INT NOT NULL DEFAULT 0,
|
||||
|
||||
-- 费用(USD)
|
||||
input_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
output_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
cache_creation_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
cache_read_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 原始总费用
|
||||
actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 实际扣除费用
|
||||
|
||||
-- 元数据
|
||||
stream BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
duration_ms INT,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_user_id ON usage_logs(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_api_key_id ON usage_logs(api_key_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_account_id ON usage_logs(account_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_model ON usage_logs(model);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_created_at ON usage_logs(created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_user_created ON usage_logs(user_id, created_at);
|
||||
33
backend/migrations/002_account_type_migration.sql
Normal file
33
backend/migrations/002_account_type_migration.sql
Normal file
@@ -0,0 +1,33 @@
|
||||
-- Sub2API 账号类型迁移脚本
|
||||
-- 将 'official' 类型账号迁移为 'oauth' 或 'setup-token'
|
||||
-- 根据 credentials->>'scope' 字段判断:
|
||||
-- - 包含 'user:profile' 的是 'oauth' 类型
|
||||
-- - 只有 'user:inference' 的是 'setup-token' 类型
|
||||
|
||||
-- 1. 将包含 profile scope 的 official 账号迁移为 oauth
|
||||
UPDATE accounts
|
||||
SET type = 'oauth',
|
||||
updated_at = NOW()
|
||||
WHERE type = 'official'
|
||||
AND credentials->>'scope' LIKE '%user:profile%';
|
||||
|
||||
-- 2. 将只有 inference scope 的 official 账号迁移为 setup-token
|
||||
UPDATE accounts
|
||||
SET type = 'setup-token',
|
||||
updated_at = NOW()
|
||||
WHERE type = 'official'
|
||||
AND (
|
||||
credentials->>'scope' = 'user:inference'
|
||||
OR credentials->>'scope' NOT LIKE '%user:profile%'
|
||||
);
|
||||
|
||||
-- 3. 处理没有 scope 字段的旧账号(默认为 oauth)
|
||||
UPDATE accounts
|
||||
SET type = 'oauth',
|
||||
updated_at = NOW()
|
||||
WHERE type = 'official'
|
||||
AND (credentials->>'scope' IS NULL OR credentials->>'scope' = '');
|
||||
|
||||
-- 4. 验证迁移结果(查询是否还有 official 类型账号)
|
||||
-- SELECT COUNT(*) FROM accounts WHERE type = 'official';
|
||||
-- 如果结果为 0,说明迁移成功
|
||||
65
backend/migrations/003_subscription.sql
Normal file
65
backend/migrations/003_subscription.sql
Normal file
@@ -0,0 +1,65 @@
|
||||
-- Sub2API 订阅功能迁移脚本
|
||||
-- 添加订阅分组和用户订阅功能
|
||||
|
||||
-- 1. 扩展 groups 表添加订阅相关字段
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS platform VARCHAR(50) NOT NULL DEFAULT 'anthropic';
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS subscription_type VARCHAR(20) NOT NULL DEFAULT 'standard';
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS daily_limit_usd DECIMAL(20, 8) DEFAULT NULL;
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS weekly_limit_usd DECIMAL(20, 8) DEFAULT NULL;
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS monthly_limit_usd DECIMAL(20, 8) DEFAULT NULL;
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS default_validity_days INT NOT NULL DEFAULT 30;
|
||||
|
||||
-- 添加索引
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_platform ON groups(platform);
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_subscription_type ON groups(subscription_type);
|
||||
|
||||
-- 2. 创建 user_subscriptions 用户订阅表
|
||||
CREATE TABLE IF NOT EXISTS user_subscriptions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
||||
|
||||
-- 订阅有效期
|
||||
starts_at TIMESTAMPTZ NOT NULL,
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/expired/suspended
|
||||
|
||||
-- 滑动窗口起始时间(NULL=未激活)
|
||||
daily_window_start TIMESTAMPTZ,
|
||||
weekly_window_start TIMESTAMPTZ,
|
||||
monthly_window_start TIMESTAMPTZ,
|
||||
|
||||
-- 当前窗口已用额度(USD,基于 total_cost 计算)
|
||||
daily_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
weekly_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
monthly_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
|
||||
-- 管理员分配信息
|
||||
assigned_by BIGINT REFERENCES users(id) ON DELETE SET NULL,
|
||||
assigned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
notes TEXT,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
-- 唯一约束:每个用户对每个分组只能有一个订阅
|
||||
UNIQUE(user_id, group_id)
|
||||
);
|
||||
|
||||
-- user_subscriptions 索引
|
||||
CREATE INDEX IF NOT EXISTS idx_user_subscriptions_user_id ON user_subscriptions(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_subscriptions_group_id ON user_subscriptions(group_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_subscriptions_status ON user_subscriptions(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_subscriptions_expires_at ON user_subscriptions(expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_subscriptions_assigned_by ON user_subscriptions(assigned_by);
|
||||
|
||||
-- 3. 扩展 usage_logs 表添加分组和订阅关联
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL;
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS subscription_id BIGINT REFERENCES user_subscriptions(id) ON DELETE SET NULL;
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10, 4) NOT NULL DEFAULT 1;
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS first_token_ms INT;
|
||||
|
||||
-- usage_logs 新索引
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_group_id ON usage_logs(group_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_subscription_id ON usage_logs(subscription_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_sub_created ON usage_logs(subscription_id, created_at);
|
||||
6
backend/migrations/004_add_redeem_code_notes.sql
Normal file
6
backend/migrations/004_add_redeem_code_notes.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
-- 为 redeem_codes 表添加备注字段
|
||||
|
||||
ALTER TABLE redeem_codes
|
||||
ADD COLUMN IF NOT EXISTS notes TEXT DEFAULT NULL;
|
||||
|
||||
COMMENT ON COLUMN redeem_codes.notes IS '备注说明(管理员调整时的原因说明)';
|
||||
42
backend/migrations/005_schema_parity.sql
Normal file
42
backend/migrations/005_schema_parity.sql
Normal file
@@ -0,0 +1,42 @@
|
||||
-- Align SQL migrations with current GORM persistence models.
|
||||
-- This file is designed to be safe on both fresh installs and existing databases.
|
||||
|
||||
-- users: add fields added after initial migration
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS username VARCHAR(100) NOT NULL DEFAULT '';
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS wechat VARCHAR(100) NOT NULL DEFAULT '';
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS notes TEXT NOT NULL DEFAULT '';
|
||||
|
||||
-- api_keys: allow longer keys (GORM model uses size:128)
|
||||
ALTER TABLE api_keys ALTER COLUMN key TYPE VARCHAR(128);
|
||||
|
||||
-- accounts: scheduling and rate-limit fields used by repository queries
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS schedulable BOOLEAN NOT NULL DEFAULT TRUE;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limited_at TIMESTAMPTZ;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limit_reset_at TIMESTAMPTZ;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS overload_until TIMESTAMPTZ;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_start TIMESTAMPTZ;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_end TIMESTAMPTZ;
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_status VARCHAR(20);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_schedulable ON accounts(schedulable);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_rate_limited_at ON accounts(rate_limited_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_rate_limit_reset_at ON accounts(rate_limit_reset_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_overload_until ON accounts(overload_until);
|
||||
|
||||
-- redeem_codes: subscription redeem fields
|
||||
ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL;
|
||||
ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS validity_days INT NOT NULL DEFAULT 30;
|
||||
CREATE INDEX IF NOT EXISTS idx_redeem_codes_group_id ON redeem_codes(group_id);
|
||||
|
||||
-- usage_logs: billing type used by filters and stats
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS billing_type SMALLINT NOT NULL DEFAULT 0;
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_billing_type ON usage_logs(billing_type);
|
||||
|
||||
-- settings: key-value store
|
||||
CREATE TABLE IF NOT EXISTS settings (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
key VARCHAR(100) NOT NULL UNIQUE,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
-- Fix legacy subscription records with invalid expires_at (year > 2099).
|
||||
DO $$
|
||||
BEGIN
|
||||
IF to_regclass('public.user_subscriptions') IS NOT NULL THEN
|
||||
UPDATE user_subscriptions
|
||||
SET expires_at = TIMESTAMPTZ '2099-12-31 23:59:59+00'
|
||||
WHERE expires_at > TIMESTAMPTZ '2099-12-31 23:59:59+00';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
20
backend/migrations/007_add_user_allowed_groups.sql
Normal file
20
backend/migrations/007_add_user_allowed_groups.sql
Normal file
@@ -0,0 +1,20 @@
|
||||
-- Add user_allowed_groups join table to replace users.allowed_groups (BIGINT[]).
|
||||
-- Phase 1: create table + backfill from the legacy array column.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS user_allowed_groups (
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
PRIMARY KEY (user_id, group_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_user_allowed_groups_group_id ON user_allowed_groups(group_id);
|
||||
|
||||
-- Backfill from the legacy users.allowed_groups array.
|
||||
INSERT INTO user_allowed_groups (user_id, group_id)
|
||||
SELECT u.id, x.group_id
|
||||
FROM users u
|
||||
CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id)
|
||||
JOIN groups g ON g.id = x.group_id
|
||||
WHERE u.allowed_groups IS NOT NULL
|
||||
ON CONFLICT DO NOTHING;
|
||||
4
backend/migrations/008_seed_default_group.sql
Normal file
4
backend/migrations/008_seed_default_group.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
-- Seed a default group for fresh installs.
|
||||
INSERT INTO groups (name, description, created_at, updated_at)
|
||||
SELECT 'default', 'Default group', NOW(), NOW()
|
||||
WHERE NOT EXISTS (SELECT 1 FROM groups);
|
||||
37
backend/migrations/009_fix_usage_logs_cache_columns.sql
Normal file
37
backend/migrations/009_fix_usage_logs_cache_columns.sql
Normal file
@@ -0,0 +1,37 @@
|
||||
-- Ensure usage_logs cache token columns use the underscored names expected by code.
|
||||
-- Backfill from legacy column names if they exist.
|
||||
|
||||
ALTER TABLE usage_logs
|
||||
ADD COLUMN IF NOT EXISTS cache_creation_5m_tokens INT NOT NULL DEFAULT 0;
|
||||
|
||||
ALTER TABLE usage_logs
|
||||
ADD COLUMN IF NOT EXISTS cache_creation_1h_tokens INT NOT NULL DEFAULT 0;
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'usage_logs'
|
||||
AND column_name = 'cache_creation5m_tokens'
|
||||
) THEN
|
||||
UPDATE usage_logs
|
||||
SET cache_creation_5m_tokens = cache_creation5m_tokens
|
||||
WHERE cache_creation_5m_tokens = 0
|
||||
AND cache_creation5m_tokens <> 0;
|
||||
END IF;
|
||||
|
||||
IF EXISTS (
|
||||
SELECT 1
|
||||
FROM information_schema.columns
|
||||
WHERE table_schema = 'public'
|
||||
AND table_name = 'usage_logs'
|
||||
AND column_name = 'cache_creation1h_tokens'
|
||||
) THEN
|
||||
UPDATE usage_logs
|
||||
SET cache_creation_1h_tokens = cache_creation1h_tokens
|
||||
WHERE cache_creation_1h_tokens = 0
|
||||
AND cache_creation1h_tokens <> 0;
|
||||
END IF;
|
||||
END $$;
|
||||
@@ -0,0 +1,4 @@
|
||||
-- 为聚合查询补充复合索引
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_account_created_at ON usage_logs(account_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_api_key_created_at ON usage_logs(api_key_id, created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_model_created_at ON usage_logs(model, created_at);
|
||||
39
backend/migrations/011_remove_duplicate_unique_indexes.sql
Normal file
39
backend/migrations/011_remove_duplicate_unique_indexes.sql
Normal file
@@ -0,0 +1,39 @@
|
||||
-- 011_remove_duplicate_unique_indexes.sql
|
||||
-- 移除重复的唯一索引
|
||||
-- 这些字段在 ent schema 的 Fields() 中已声明 .Unique(),
|
||||
-- 因此在 Indexes() 中再次声明 index.Fields("x").Unique() 会创建重复索引。
|
||||
-- 本迁移脚本清理这些冗余索引。
|
||||
|
||||
-- 重复索引命名约定(由 Ent 自动生成/历史迁移遗留):
|
||||
-- - 字段级 Unique() 创建的索引名: <table>_<field>_key
|
||||
-- - Indexes() 中的 Unique() 创建的索引名: <table>_<field>
|
||||
-- - 初始化迁移中的非唯一索引: idx_<table>_<field>
|
||||
|
||||
-- 仅当索引存在时才删除(幂等操作)
|
||||
|
||||
-- api_keys 表: key 字段
|
||||
DROP INDEX IF EXISTS apikey_key;
|
||||
DROP INDEX IF EXISTS api_keys_key;
|
||||
DROP INDEX IF EXISTS idx_api_keys_key;
|
||||
|
||||
-- users 表: email 字段
|
||||
DROP INDEX IF EXISTS user_email;
|
||||
DROP INDEX IF EXISTS users_email;
|
||||
DROP INDEX IF EXISTS idx_users_email;
|
||||
|
||||
-- settings 表: key 字段
|
||||
DROP INDEX IF EXISTS settings_key;
|
||||
DROP INDEX IF EXISTS idx_settings_key;
|
||||
|
||||
-- redeem_codes 表: code 字段
|
||||
DROP INDEX IF EXISTS redeemcode_code;
|
||||
DROP INDEX IF EXISTS redeem_codes_code;
|
||||
DROP INDEX IF EXISTS idx_redeem_codes_code;
|
||||
|
||||
-- groups 表: name 字段
|
||||
DROP INDEX IF EXISTS group_name;
|
||||
DROP INDEX IF EXISTS groups_name;
|
||||
DROP INDEX IF EXISTS idx_groups_name;
|
||||
|
||||
-- 注意: 每个字段的唯一约束仍由字段级 Unique() 创建的约束保留,
|
||||
-- 如 api_keys_key_key、users_email_key 等。
|
||||
13
backend/migrations/012_add_user_subscription_soft_delete.sql
Normal file
13
backend/migrations/012_add_user_subscription_soft_delete.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
-- 012: 为 user_subscriptions 表添加软删除支持
|
||||
-- 任务:fix-medium-data-hygiene 1.1
|
||||
|
||||
-- 添加 deleted_at 字段
|
||||
ALTER TABLE user_subscriptions
|
||||
ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ DEFAULT NULL;
|
||||
|
||||
-- 添加 deleted_at 索引以优化软删除查询
|
||||
CREATE INDEX IF NOT EXISTS usersubscription_deleted_at
|
||||
ON user_subscriptions (deleted_at);
|
||||
|
||||
-- 注释:与其他使用软删除的实体保持一致
|
||||
COMMENT ON COLUMN user_subscriptions.deleted_at IS '软删除时间戳,NULL 表示未删除';
|
||||
32
backend/migrations/013_log_orphan_allowed_groups.sql
Normal file
32
backend/migrations/013_log_orphan_allowed_groups.sql
Normal file
@@ -0,0 +1,32 @@
|
||||
-- 013: 记录 users.allowed_groups 中的孤立 group_id
|
||||
-- 任务:fix-medium-data-hygiene 3.1
|
||||
--
|
||||
-- 目的:在删除 legacy allowed_groups 列前,记录所有引用了不存在 group 的孤立记录
|
||||
-- 这些记录可用于审计或后续数据修复
|
||||
|
||||
-- 创建审计表存储孤立的 allowed_groups 记录
|
||||
CREATE TABLE IF NOT EXISTS orphan_allowed_groups_audit (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL,
|
||||
group_id BIGINT NOT NULL,
|
||||
recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (user_id, group_id)
|
||||
);
|
||||
|
||||
-- 记录孤立的 group_id(存在于 users.allowed_groups 但不存在于 groups 表)
|
||||
INSERT INTO orphan_allowed_groups_audit (user_id, group_id)
|
||||
SELECT u.id, x.group_id
|
||||
FROM users u
|
||||
CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id)
|
||||
LEFT JOIN groups g ON g.id = x.group_id
|
||||
WHERE u.allowed_groups IS NOT NULL
|
||||
AND g.id IS NULL
|
||||
ON CONFLICT (user_id, group_id) DO NOTHING;
|
||||
|
||||
-- 添加索引便于查询
|
||||
CREATE INDEX IF NOT EXISTS idx_orphan_allowed_groups_audit_user_id
|
||||
ON orphan_allowed_groups_audit(user_id);
|
||||
|
||||
-- 记录迁移完成信息
|
||||
COMMENT ON TABLE orphan_allowed_groups_audit IS
|
||||
'审计表:记录 users.allowed_groups 中引用的不存在的 group_id,用于数据清理前的审计';
|
||||
15
backend/migrations/014_drop_legacy_allowed_groups.sql
Normal file
15
backend/migrations/014_drop_legacy_allowed_groups.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- 014: 删除 legacy users.allowed_groups 列
|
||||
-- 任务:fix-medium-data-hygiene 3.3
|
||||
--
|
||||
-- 前置条件:
|
||||
-- - 迁移 007 已将数据回填到 user_allowed_groups 联接表
|
||||
-- - 迁移 013 已记录所有孤立的 group_id 到审计表
|
||||
-- - 应用代码已停止写入该列(3.2 完成)
|
||||
--
|
||||
-- 该列现已废弃,所有读写操作均使用 user_allowed_groups 联接表。
|
||||
|
||||
-- 删除 allowed_groups 列
|
||||
ALTER TABLE users DROP COLUMN IF EXISTS allowed_groups;
|
||||
|
||||
-- 添加注释记录删除原因
|
||||
COMMENT ON TABLE users IS '用户表。注:原 allowed_groups BIGINT[] 列已迁移至 user_allowed_groups 联接表';
|
||||
19
backend/migrations/015_fix_settings_unique_constraint.sql
Normal file
19
backend/migrations/015_fix_settings_unique_constraint.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
-- 015_fix_settings_unique_constraint.sql
|
||||
-- 修复 settings 表 key 字段缺失的唯一约束
|
||||
-- 此约束是 ON CONFLICT ("key") DO UPDATE 语句所必需的
|
||||
|
||||
-- 检查并添加唯一约束(如果不存在)
|
||||
DO $$
|
||||
BEGIN
|
||||
-- 检查是否已存在唯一约束
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_constraint
|
||||
WHERE conrelid = 'settings'::regclass
|
||||
AND contype = 'u'
|
||||
AND conname = 'settings_key_key'
|
||||
) THEN
|
||||
-- 添加唯一约束
|
||||
ALTER TABLE settings ADD CONSTRAINT settings_key_key UNIQUE (key);
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
@@ -0,0 +1,51 @@
|
||||
-- 016_soft_delete_partial_unique_indexes.sql
|
||||
-- 修复软删除 + 唯一约束冲突问题
|
||||
-- 将普通唯一约束替换为部分唯一索引(WHERE deleted_at IS NULL)
|
||||
-- 这样软删除的记录不会占用唯一约束位置,允许删后重建同名/同邮箱/同订阅关系
|
||||
|
||||
-- ============================================================================
|
||||
-- 1. users 表: email 字段
|
||||
-- ============================================================================
|
||||
|
||||
-- 删除旧的唯一约束(可能的命名方式)
|
||||
ALTER TABLE users DROP CONSTRAINT IF EXISTS users_email_key;
|
||||
DROP INDEX IF EXISTS users_email_key;
|
||||
DROP INDEX IF EXISTS user_email_key;
|
||||
|
||||
-- 创建部分唯一索引:只对未删除的记录建立唯一约束
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS users_email_unique_active
|
||||
ON users(email)
|
||||
WHERE deleted_at IS NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- 2. groups 表: name 字段
|
||||
-- ============================================================================
|
||||
|
||||
-- 删除旧的唯一约束
|
||||
ALTER TABLE groups DROP CONSTRAINT IF EXISTS groups_name_key;
|
||||
DROP INDEX IF EXISTS groups_name_key;
|
||||
DROP INDEX IF EXISTS group_name_key;
|
||||
|
||||
-- 创建部分唯一索引
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS groups_name_unique_active
|
||||
ON groups(name)
|
||||
WHERE deleted_at IS NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- 3. user_subscriptions 表: (user_id, group_id) 组合字段
|
||||
-- ============================================================================
|
||||
|
||||
-- 删除旧的唯一约束/索引
|
||||
ALTER TABLE user_subscriptions DROP CONSTRAINT IF EXISTS user_subscriptions_user_id_group_id_key;
|
||||
DROP INDEX IF EXISTS user_subscriptions_user_id_group_id_key;
|
||||
DROP INDEX IF EXISTS usersubscription_user_id_group_id;
|
||||
|
||||
-- 创建部分唯一索引
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS user_subscriptions_user_group_unique_active
|
||||
ON user_subscriptions(user_id, group_id)
|
||||
WHERE deleted_at IS NULL;
|
||||
|
||||
-- ============================================================================
|
||||
-- 注意: api_keys 表的 key 字段保留普通唯一约束
|
||||
-- API Key 即使软删除后也不应该重复使用(安全考虑)
|
||||
-- ============================================================================
|
||||
48
backend/migrations/018_user_attributes.sql
Normal file
48
backend/migrations/018_user_attributes.sql
Normal file
@@ -0,0 +1,48 @@
|
||||
-- Add user attribute definitions and values tables for custom user attributes.
|
||||
|
||||
-- User Attribute Definitions table (with soft delete support)
|
||||
CREATE TABLE IF NOT EXISTS user_attribute_definitions (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
key VARCHAR(100) NOT NULL,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
description TEXT DEFAULT '',
|
||||
type VARCHAR(20) NOT NULL,
|
||||
options JSONB DEFAULT '[]'::jsonb,
|
||||
required BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
validation JSONB DEFAULT '{}'::jsonb,
|
||||
placeholder VARCHAR(255) DEFAULT '',
|
||||
display_order INT NOT NULL DEFAULT 0,
|
||||
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
deleted_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
-- Partial unique index for key (only for non-deleted records)
|
||||
-- Allows reusing keys after soft delete
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_user_attribute_definitions_key_unique
|
||||
ON user_attribute_definitions(key) WHERE deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_enabled
|
||||
ON user_attribute_definitions(enabled);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_display_order
|
||||
ON user_attribute_definitions(display_order);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_deleted_at
|
||||
ON user_attribute_definitions(deleted_at);
|
||||
|
||||
-- User Attribute Values table (hard delete only, no deleted_at)
|
||||
CREATE TABLE IF NOT EXISTS user_attribute_values (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
attribute_id BIGINT NOT NULL REFERENCES user_attribute_definitions(id) ON DELETE CASCADE,
|
||||
value TEXT DEFAULT '',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
UNIQUE(user_id, attribute_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_user_attribute_values_user_id
|
||||
ON user_attribute_values(user_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_user_attribute_values_attribute_id
|
||||
ON user_attribute_values(attribute_id);
|
||||
83
backend/migrations/019_migrate_wechat_to_attributes.sql
Normal file
83
backend/migrations/019_migrate_wechat_to_attributes.sql
Normal file
@@ -0,0 +1,83 @@
|
||||
-- Migration: Move wechat field from users table to user_attribute_values
|
||||
-- This migration:
|
||||
-- 1. Creates a "wechat" attribute definition
|
||||
-- 2. Migrates existing wechat data to user_attribute_values
|
||||
-- 3. Does NOT drop the wechat column (for rollback safety, can be done in a later migration)
|
||||
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
-- Step 1: Insert wechat attribute definition if not exists
|
||||
INSERT INTO user_attribute_definitions (key, name, description, type, options, required, validation, placeholder, display_order, enabled, created_at, updated_at)
|
||||
SELECT 'wechat', '微信', '用户微信号', 'text', '[]'::jsonb, false, '{}'::jsonb, '请输入微信号', 0, true, NOW(), NOW()
|
||||
WHERE NOT EXISTS (
|
||||
SELECT 1 FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL
|
||||
);
|
||||
|
||||
-- Step 2: Migrate existing wechat values to user_attribute_values
|
||||
-- Only migrate non-empty values
|
||||
INSERT INTO user_attribute_values (user_id, attribute_id, value, created_at, updated_at)
|
||||
SELECT
|
||||
u.id,
|
||||
(SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL LIMIT 1),
|
||||
u.wechat,
|
||||
NOW(),
|
||||
NOW()
|
||||
FROM users u
|
||||
WHERE u.wechat IS NOT NULL
|
||||
AND u.wechat != ''
|
||||
AND u.deleted_at IS NULL
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM user_attribute_values uav
|
||||
WHERE uav.user_id = u.id
|
||||
AND uav.attribute_id = (SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL LIMIT 1)
|
||||
);
|
||||
|
||||
-- Step 3: Update display_order to ensure wechat appears first
|
||||
UPDATE user_attribute_definitions
|
||||
SET display_order = -1
|
||||
WHERE key = 'wechat' AND deleted_at IS NULL;
|
||||
|
||||
-- Reorder all attributes starting from 0
|
||||
WITH ordered AS (
|
||||
SELECT id, ROW_NUMBER() OVER (ORDER BY display_order, id) - 1 as new_order
|
||||
FROM user_attribute_definitions
|
||||
WHERE deleted_at IS NULL
|
||||
)
|
||||
UPDATE user_attribute_definitions
|
||||
SET display_order = ordered.new_order
|
||||
FROM ordered
|
||||
WHERE user_attribute_definitions.id = ordered.id;
|
||||
|
||||
-- Step 4: Drop the redundant wechat column from users table
|
||||
ALTER TABLE users DROP COLUMN IF EXISTS wechat;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
-- Restore wechat column
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS wechat VARCHAR(100) DEFAULT '';
|
||||
|
||||
-- Copy attribute values back to users.wechat column
|
||||
UPDATE users u
|
||||
SET wechat = uav.value
|
||||
FROM user_attribute_values uav
|
||||
JOIN user_attribute_definitions uad ON uav.attribute_id = uad.id
|
||||
WHERE uav.user_id = u.id
|
||||
AND uad.key = 'wechat'
|
||||
AND uad.deleted_at IS NULL;
|
||||
|
||||
-- Delete migrated attribute values
|
||||
DELETE FROM user_attribute_values
|
||||
WHERE attribute_id IN (
|
||||
SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL
|
||||
);
|
||||
|
||||
-- Soft-delete the wechat attribute definition
|
||||
UPDATE user_attribute_definitions
|
||||
SET deleted_at = NOW()
|
||||
WHERE key = 'wechat' AND deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
15
backend/migrations/020_add_temp_unschedulable.sql
Normal file
15
backend/migrations/020_add_temp_unschedulable.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- 020_add_temp_unschedulable.sql
|
||||
-- 添加临时不可调度功能相关字段
|
||||
|
||||
-- 添加临时不可调度状态解除时间字段
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS temp_unschedulable_until timestamptz;
|
||||
|
||||
-- 添加临时不可调度原因字段(用于排障和审计)
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS temp_unschedulable_reason text;
|
||||
|
||||
-- 添加索引以优化调度查询性能
|
||||
CREATE INDEX IF NOT EXISTS idx_accounts_temp_unschedulable_until ON accounts(temp_unschedulable_until) WHERE deleted_at IS NULL;
|
||||
|
||||
-- 添加注释说明字段用途
|
||||
COMMENT ON COLUMN accounts.temp_unschedulable_until IS '临时不可调度状态解除时间,当触发临时不可调度规则时设置(基于错误码或错误描述关键词)';
|
||||
COMMENT ON COLUMN accounts.temp_unschedulable_reason IS '临时不可调度原因,记录触发临时不可调度的具体原因(用于排障和审计)';
|
||||
30
backend/migrations/024_add_gemini_tier_id.sql
Normal file
30
backend/migrations/024_add_gemini_tier_id.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
-- 为 Gemini Code Assist OAuth 账号添加默认 tier_id
|
||||
-- 包括显式标记为 code_assist 的账号,以及 legacy 账号(oauth_type 为空但 project_id 存在)
|
||||
UPDATE accounts
|
||||
SET credentials = jsonb_set(
|
||||
credentials,
|
||||
'{tier_id}',
|
||||
'"LEGACY"',
|
||||
true
|
||||
)
|
||||
WHERE platform = 'gemini'
|
||||
AND type = 'oauth'
|
||||
AND jsonb_typeof(credentials) = 'object'
|
||||
AND credentials->>'tier_id' IS NULL
|
||||
AND (
|
||||
credentials->>'oauth_type' = 'code_assist'
|
||||
OR (credentials->>'oauth_type' IS NULL AND credentials->>'project_id' IS NOT NULL)
|
||||
);
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
-- 回滚:删除 tier_id 字段
|
||||
UPDATE accounts
|
||||
SET credentials = credentials - 'tier_id'
|
||||
WHERE platform = 'gemini'
|
||||
AND type = 'oauth'
|
||||
AND credentials ? 'tier_id';
|
||||
-- +goose StatementEnd
|
||||
104
backend/migrations/026_ops_metrics_aggregation_tables.sql
Normal file
104
backend/migrations/026_ops_metrics_aggregation_tables.sql
Normal file
@@ -0,0 +1,104 @@
|
||||
-- Ops monitoring: pre-aggregation tables for dashboard queries
|
||||
--
|
||||
-- Problem:
|
||||
-- The ops dashboard currently runs percentile_cont + GROUP BY queries over large raw tables
|
||||
-- (usage_logs, ops_error_logs). These will get slower as data grows.
|
||||
--
|
||||
-- This migration adds schema-only aggregation tables that can be populated by a future background job.
|
||||
-- No triggers/functions/jobs are created here (schema only).
|
||||
|
||||
-- ============================================
|
||||
-- Hourly aggregates (per provider/platform)
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_metrics_hourly (
|
||||
-- Start of the hour bucket (recommended: UTC).
|
||||
bucket_start TIMESTAMPTZ NOT NULL,
|
||||
|
||||
-- Provider/platform label (e.g. anthropic/openai/gemini). Mirrors ops_* queries that GROUP BY platform.
|
||||
platform VARCHAR(50) NOT NULL,
|
||||
|
||||
-- Traffic counts (use these to compute rates reliably across ranges).
|
||||
request_count BIGINT NOT NULL DEFAULT 0,
|
||||
success_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- Error breakdown used by provider health UI.
|
||||
error_4xx_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_5xx_count BIGINT NOT NULL DEFAULT 0,
|
||||
timeout_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- Latency aggregates (ms).
|
||||
avg_latency_ms DOUBLE PRECISION,
|
||||
p99_latency_ms DOUBLE PRECISION,
|
||||
|
||||
-- Convenience rate (percentage, 0-100). Still keep counts as source of truth.
|
||||
error_rate DOUBLE PRECISION NOT NULL DEFAULT 0,
|
||||
|
||||
-- When this row was last (re)computed by the background job.
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
PRIMARY KEY (bucket_start, platform)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_platform_bucket_start
|
||||
ON ops_metrics_hourly (platform, bucket_start DESC);
|
||||
|
||||
COMMENT ON TABLE ops_metrics_hourly IS 'Pre-aggregated hourly ops metrics by provider/platform to speed up dashboard queries.';
|
||||
COMMENT ON COLUMN ops_metrics_hourly.bucket_start IS 'Start timestamp of the hour bucket (recommended UTC).';
|
||||
COMMENT ON COLUMN ops_metrics_hourly.platform IS 'Provider/platform label (anthropic/openai/gemini, etc).';
|
||||
COMMENT ON COLUMN ops_metrics_hourly.error_rate IS 'Error rate percentage for the bucket (0-100). Counts remain the source of truth.';
|
||||
COMMENT ON COLUMN ops_metrics_hourly.computed_at IS 'When the row was last computed/refreshed.';
|
||||
|
||||
-- ============================================
|
||||
-- Daily aggregates (per provider/platform)
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_metrics_daily (
|
||||
-- Day bucket (recommended: UTC date).
|
||||
bucket_date DATE NOT NULL,
|
||||
platform VARCHAR(50) NOT NULL,
|
||||
|
||||
request_count BIGINT NOT NULL DEFAULT 0,
|
||||
success_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
error_4xx_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_5xx_count BIGINT NOT NULL DEFAULT 0,
|
||||
timeout_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
avg_latency_ms DOUBLE PRECISION,
|
||||
p99_latency_ms DOUBLE PRECISION,
|
||||
|
||||
error_rate DOUBLE PRECISION NOT NULL DEFAULT 0,
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
PRIMARY KEY (bucket_date, platform)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_platform_bucket_date
|
||||
ON ops_metrics_daily (platform, bucket_date DESC);
|
||||
|
||||
COMMENT ON TABLE ops_metrics_daily IS 'Pre-aggregated daily ops metrics by provider/platform for longer-term trends.';
|
||||
COMMENT ON COLUMN ops_metrics_daily.bucket_date IS 'UTC date of the day bucket (recommended).';
|
||||
|
||||
-- ============================================
|
||||
-- Population strategy (future background job)
|
||||
-- ============================================
|
||||
--
|
||||
-- Suggested approach:
|
||||
-- 1) Compute hourly buckets from raw logs using UTC time-bucketing, then UPSERT into ops_metrics_hourly.
|
||||
-- 2) Compute daily buckets either directly from raw logs or by rolling up ops_metrics_hourly.
|
||||
--
|
||||
-- Notes:
|
||||
-- - Ensure the job uses a consistent timezone (recommended: SET TIME ZONE ''UTC'') to avoid bucket drift.
|
||||
-- - Derive the provider/platform similarly to existing dashboard queries:
|
||||
-- usage_logs: COALESCE(NULLIF(groups.platform, ''), accounts.platform, '')
|
||||
-- ops_error_logs: COALESCE(NULLIF(ops_error_logs.platform, ''), groups.platform, accounts.platform, '')
|
||||
-- - Keep request_count/success_count/error_count as the authoritative values; compute error_rate from counts.
|
||||
--
|
||||
-- Example (hourly) shape (pseudo-SQL):
|
||||
-- INSERT INTO ops_metrics_hourly (...)
|
||||
-- SELECT date_trunc('hour', created_at) AS bucket_start, platform, ...
|
||||
-- FROM (/* aggregate usage_logs + ops_error_logs */) s
|
||||
-- ON CONFLICT (bucket_start, platform) DO UPDATE SET ...;
|
||||
58
backend/migrations/027_usage_billing_consistency.sql
Normal file
58
backend/migrations/027_usage_billing_consistency.sql
Normal file
@@ -0,0 +1,58 @@
|
||||
-- 027_usage_billing_consistency.sql
|
||||
-- Ensure usage_logs idempotency (request_id, api_key_id) and add reconciliation infrastructure.
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- 1) Normalize legacy request_id values
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- Historically request_id may be inserted as empty string. Convert it to NULL so
|
||||
-- the upcoming unique index does not break on repeated "" values.
|
||||
UPDATE usage_logs
|
||||
SET request_id = NULL
|
||||
WHERE request_id = '';
|
||||
|
||||
-- If duplicates already exist for the same (request_id, api_key_id), keep the
|
||||
-- first row and NULL-out request_id for the rest so the unique index can be
|
||||
-- created without deleting historical logs.
|
||||
WITH ranked AS (
|
||||
SELECT
|
||||
id,
|
||||
ROW_NUMBER() OVER (PARTITION BY api_key_id, request_id ORDER BY id) AS rn
|
||||
FROM usage_logs
|
||||
WHERE request_id IS NOT NULL
|
||||
)
|
||||
UPDATE usage_logs ul
|
||||
SET request_id = NULL
|
||||
FROM ranked r
|
||||
WHERE ul.id = r.id
|
||||
AND r.rn > 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- 2) Idempotency constraint for usage_logs
|
||||
-- -----------------------------------------------------------------------------
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_usage_logs_request_id_api_key_unique
|
||||
ON usage_logs (request_id, api_key_id);
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- 3) Reconciliation infrastructure: billing ledger for usage charges
|
||||
-- -----------------------------------------------------------------------------
|
||||
CREATE TABLE IF NOT EXISTS billing_usage_entries (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
usage_log_id BIGINT NOT NULL REFERENCES usage_logs(id) ON DELETE CASCADE,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
api_key_id BIGINT NOT NULL REFERENCES api_keys(id) ON DELETE CASCADE,
|
||||
subscription_id BIGINT REFERENCES user_subscriptions(id) ON DELETE SET NULL,
|
||||
billing_type SMALLINT NOT NULL,
|
||||
applied BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
delta_usd DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS billing_usage_entries_usage_log_id_unique
|
||||
ON billing_usage_entries (usage_log_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_billing_usage_entries_user_time
|
||||
ON billing_usage_entries (user_id, created_at);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_billing_usage_entries_created_at
|
||||
ON billing_usage_entries (created_at);
|
||||
|
||||
7
backend/migrations/028_add_account_notes.sql
Normal file
7
backend/migrations/028_add_account_notes.sql
Normal file
@@ -0,0 +1,7 @@
|
||||
-- 028_add_account_notes.sql
|
||||
-- Add optional admin notes for accounts.
|
||||
|
||||
ALTER TABLE accounts
|
||||
ADD COLUMN IF NOT EXISTS notes TEXT;
|
||||
|
||||
COMMENT ON COLUMN accounts.notes IS 'Admin-only notes for account';
|
||||
10
backend/migrations/028_add_usage_logs_user_agent.sql
Normal file
10
backend/migrations/028_add_usage_logs_user_agent.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
-- Add user_agent column to usage_logs table
|
||||
-- Records the User-Agent header from API requests for analytics and debugging
|
||||
|
||||
ALTER TABLE usage_logs
|
||||
ADD COLUMN IF NOT EXISTS user_agent VARCHAR(512);
|
||||
|
||||
-- Optional: Add index for user_agent queries (uncomment if needed for analytics)
|
||||
-- CREATE INDEX IF NOT EXISTS idx_usage_logs_user_agent ON usage_logs(user_agent);
|
||||
|
||||
COMMENT ON COLUMN usage_logs.user_agent IS 'User-Agent header from the API request';
|
||||
10
backend/migrations/028_group_image_pricing.sql
Normal file
10
backend/migrations/028_group_image_pricing.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
-- 为 Antigravity 分组添加图片生成计费配置
|
||||
-- 支持 gemini-3-pro-image 模型的 1K/2K/4K 分辨率按次计费
|
||||
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_1k DECIMAL(20,8);
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_2k DECIMAL(20,8);
|
||||
ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_4k DECIMAL(20,8);
|
||||
|
||||
COMMENT ON COLUMN groups.image_price_1k IS '1K 分辨率图片生成单价 (USD),仅 antigravity 平台使用';
|
||||
COMMENT ON COLUMN groups.image_price_2k IS '2K 分辨率图片生成单价 (USD),仅 antigravity 平台使用';
|
||||
COMMENT ON COLUMN groups.image_price_4k IS '4K 分辨率图片生成单价 (USD),仅 antigravity 平台使用';
|
||||
21
backend/migrations/029_add_group_claude_code_restriction.sql
Normal file
21
backend/migrations/029_add_group_claude_code_restriction.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- 029_add_group_claude_code_restriction.sql
|
||||
-- 添加分组级别的 Claude Code 客户端限制功能
|
||||
|
||||
-- 添加 claude_code_only 字段:是否仅允许 Claude Code 客户端
|
||||
ALTER TABLE groups
|
||||
ADD COLUMN IF NOT EXISTS claude_code_only BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- 添加 fallback_group_id 字段:非 Claude Code 请求降级到的分组
|
||||
ALTER TABLE groups
|
||||
ADD COLUMN IF NOT EXISTS fallback_group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL;
|
||||
|
||||
-- 添加索引优化查询
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_claude_code_only
|
||||
ON groups(claude_code_only) WHERE deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_groups_fallback_group_id
|
||||
ON groups(fallback_group_id) WHERE deleted_at IS NULL AND fallback_group_id IS NOT NULL;
|
||||
|
||||
-- 添加字段注释
|
||||
COMMENT ON COLUMN groups.claude_code_only IS '是否仅允许 Claude Code 客户端访问此分组';
|
||||
COMMENT ON COLUMN groups.fallback_group_id IS '非 Claude Code 请求降级使用的分组 ID';
|
||||
5
backend/migrations/029_usage_log_image_fields.sql
Normal file
5
backend/migrations/029_usage_log_image_fields.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
-- 为使用日志添加图片生成统计字段
|
||||
-- 用于记录 gemini-3-pro-image 等图片生成模型的使用情况
|
||||
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS image_count INT DEFAULT 0;
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS image_size VARCHAR(10);
|
||||
10
backend/migrations/030_add_account_expires_at.sql
Normal file
10
backend/migrations/030_add_account_expires_at.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
-- Add expires_at for account expiration configuration
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS expires_at timestamptz;
|
||||
-- Document expires_at meaning
|
||||
COMMENT ON COLUMN accounts.expires_at IS 'Account expiration time (NULL means no expiration).';
|
||||
-- Add auto_pause_on_expired for account expiration scheduling control
|
||||
ALTER TABLE accounts ADD COLUMN IF NOT EXISTS auto_pause_on_expired boolean NOT NULL DEFAULT true;
|
||||
-- Document auto_pause_on_expired meaning
|
||||
COMMENT ON COLUMN accounts.auto_pause_on_expired IS 'Auto pause scheduling when account expires.';
|
||||
-- Ensure existing accounts are enabled by default
|
||||
UPDATE accounts SET auto_pause_on_expired = true;
|
||||
5
backend/migrations/031_add_ip_address.sql
Normal file
5
backend/migrations/031_add_ip_address.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
-- Add IP address field to usage_logs table for request tracking (admin-only visibility)
|
||||
ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS ip_address VARCHAR(45);
|
||||
|
||||
-- Create index for IP address queries
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_logs_ip_address ON usage_logs(ip_address);
|
||||
9
backend/migrations/032_add_api_key_ip_restriction.sql
Normal file
9
backend/migrations/032_add_api_key_ip_restriction.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- Add IP restriction fields to api_keys table
|
||||
-- ip_whitelist: JSON array of allowed IPs/CIDRs (if set, only these IPs can use the key)
|
||||
-- ip_blacklist: JSON array of blocked IPs/CIDRs (these IPs are always blocked)
|
||||
|
||||
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_whitelist JSONB DEFAULT NULL;
|
||||
ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_blacklist JSONB DEFAULT NULL;
|
||||
|
||||
COMMENT ON COLUMN api_keys.ip_whitelist IS 'JSON array of allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]';
|
||||
COMMENT ON COLUMN api_keys.ip_blacklist IS 'JSON array of blocked IPs/CIDRs, e.g. ["1.2.3.4", "5.6.0.0/16"]';
|
||||
34
backend/migrations/033_add_promo_codes.sql
Normal file
34
backend/migrations/033_add_promo_codes.sql
Normal file
@@ -0,0 +1,34 @@
|
||||
-- 创建注册优惠码表
|
||||
CREATE TABLE IF NOT EXISTS promo_codes (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
code VARCHAR(32) NOT NULL UNIQUE,
|
||||
bonus_amount DECIMAL(20,8) NOT NULL DEFAULT 0,
|
||||
max_uses INT NOT NULL DEFAULT 0,
|
||||
used_count INT NOT NULL DEFAULT 0,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'active',
|
||||
expires_at TIMESTAMPTZ DEFAULT NULL,
|
||||
notes TEXT DEFAULT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 创建优惠码使用记录表
|
||||
CREATE TABLE IF NOT EXISTS promo_code_usages (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
promo_code_id BIGINT NOT NULL REFERENCES promo_codes(id) ON DELETE CASCADE,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
bonus_amount DECIMAL(20,8) NOT NULL,
|
||||
used_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(promo_code_id, user_id)
|
||||
);
|
||||
|
||||
-- 索引
|
||||
CREATE INDEX IF NOT EXISTS idx_promo_codes_status ON promo_codes(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_promo_codes_expires_at ON promo_codes(expires_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_promo_code_usages_promo_code_id ON promo_code_usages(promo_code_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_promo_code_usages_user_id ON promo_code_usages(user_id);
|
||||
|
||||
COMMENT ON TABLE promo_codes IS '注册优惠码';
|
||||
COMMENT ON TABLE promo_code_usages IS '优惠码使用记录';
|
||||
COMMENT ON COLUMN promo_codes.max_uses IS '最大使用次数,0表示无限制';
|
||||
COMMENT ON COLUMN promo_codes.status IS '状态: active, disabled';
|
||||
717
backend/migrations/033_ops_monitoring_vnext.sql
Normal file
717
backend/migrations/033_ops_monitoring_vnext.sql
Normal file
@@ -0,0 +1,717 @@
|
||||
-- Ops Monitoring (vNext): squashed migration (030)
|
||||
--
|
||||
-- This repository originally planned Ops vNext as migrations 030-036:
|
||||
-- 030 drop legacy ops tables
|
||||
-- 031 core schema
|
||||
-- 032 pre-aggregation tables
|
||||
-- 033 indexes + optional extensions
|
||||
-- 034 add avg/max to preagg
|
||||
-- 035 add notify_email to alert rules
|
||||
-- 036 seed default alert rules
|
||||
--
|
||||
-- Since these migrations have NOT been applied to any environment yet, we squash them
|
||||
-- into a single 030 migration for easier review and a cleaner migration history.
|
||||
--
|
||||
-- Notes:
|
||||
-- - This is intentionally destructive for ops_* data (error logs / metrics / alerts).
|
||||
-- - It is idempotent (DROP/CREATE/ALTER IF EXISTS/IF NOT EXISTS), but will wipe ops_* data if re-run.
|
||||
|
||||
-- =====================================================================
|
||||
-- 030_ops_drop_legacy_ops_tables.sql
|
||||
-- =====================================================================
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- Legacy pre-aggregation tables (from 026 and/or previous branches)
|
||||
DROP TABLE IF EXISTS ops_metrics_daily CASCADE;
|
||||
DROP TABLE IF EXISTS ops_metrics_hourly CASCADE;
|
||||
|
||||
-- Core ops tables that may exist in some deployments / branches
|
||||
DROP TABLE IF EXISTS ops_system_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS ops_error_logs CASCADE;
|
||||
DROP TABLE IF EXISTS ops_alert_events CASCADE;
|
||||
DROP TABLE IF EXISTS ops_alert_rules CASCADE;
|
||||
DROP TABLE IF EXISTS ops_job_heartbeats CASCADE;
|
||||
DROP TABLE IF EXISTS ops_retry_attempts CASCADE;
|
||||
|
||||
-- Optional legacy tables (best-effort cleanup)
|
||||
DROP TABLE IF EXISTS ops_scheduled_reports CASCADE;
|
||||
DROP TABLE IF EXISTS ops_group_availability_configs CASCADE;
|
||||
DROP TABLE IF EXISTS ops_group_availability_events CASCADE;
|
||||
|
||||
-- Optional legacy views/indexes
|
||||
DROP VIEW IF EXISTS ops_latest_metrics CASCADE;
|
||||
|
||||
-- =====================================================================
|
||||
-- 031_ops_core_schema.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): core schema (errors / retries / metrics / jobs / alerts)
|
||||
--
|
||||
-- Design goals:
|
||||
-- - Support global filtering (time/platform/group) across all ops modules.
|
||||
-- - Persist enough context for two retry modes (client retry / pinned upstream retry).
|
||||
-- - Make ops background jobs observable via job heartbeats.
|
||||
-- - Keep schema stable and indexes targeted (high-write tables).
|
||||
--
|
||||
-- Notes:
|
||||
-- - This migration is idempotent.
|
||||
-- - ops_* tables intentionally avoid strict foreign keys to reduce write amplification/locks.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- ============================================
|
||||
-- 1) ops_error_logs: error log details (high-write)
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_error_logs (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
-- Correlation / identities
|
||||
request_id VARCHAR(64),
|
||||
client_request_id VARCHAR(64),
|
||||
user_id BIGINT,
|
||||
api_key_id BIGINT,
|
||||
account_id BIGINT,
|
||||
group_id BIGINT,
|
||||
client_ip inet,
|
||||
|
||||
-- Dimensions for global filtering
|
||||
platform VARCHAR(32),
|
||||
|
||||
-- Request metadata
|
||||
model VARCHAR(100),
|
||||
request_path VARCHAR(256),
|
||||
stream BOOLEAN NOT NULL DEFAULT false,
|
||||
user_agent TEXT,
|
||||
|
||||
-- Core error classification
|
||||
error_phase VARCHAR(32) NOT NULL,
|
||||
error_type VARCHAR(64) NOT NULL,
|
||||
severity VARCHAR(8) NOT NULL DEFAULT 'P2',
|
||||
status_code INT,
|
||||
|
||||
-- vNext metric semantics
|
||||
is_business_limited BOOLEAN NOT NULL DEFAULT false,
|
||||
|
||||
-- Error details (sanitized/truncated at ingest time)
|
||||
error_message TEXT,
|
||||
error_body TEXT,
|
||||
|
||||
-- Provider/upstream details (optional; useful for trends & account health)
|
||||
error_source VARCHAR(64),
|
||||
error_owner VARCHAR(32),
|
||||
account_status VARCHAR(50),
|
||||
upstream_status_code INT,
|
||||
upstream_error_message TEXT,
|
||||
upstream_error_detail TEXT,
|
||||
provider_error_code VARCHAR(64),
|
||||
provider_error_type VARCHAR(64),
|
||||
network_error_type VARCHAR(50),
|
||||
retry_after_seconds INT,
|
||||
|
||||
-- Timings (ms) - optional
|
||||
duration_ms INT,
|
||||
time_to_first_token_ms BIGINT,
|
||||
auth_latency_ms BIGINT,
|
||||
routing_latency_ms BIGINT,
|
||||
upstream_latency_ms BIGINT,
|
||||
response_latency_ms BIGINT,
|
||||
|
||||
-- Retry context (only stored for error requests)
|
||||
request_body JSONB,
|
||||
request_headers JSONB,
|
||||
request_body_truncated BOOLEAN NOT NULL DEFAULT false,
|
||||
request_body_bytes INT,
|
||||
|
||||
-- Retryability flags (best-effort classification)
|
||||
is_retryable BOOLEAN NOT NULL DEFAULT false,
|
||||
retry_count INT NOT NULL DEFAULT 0,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
COMMENT ON TABLE ops_error_logs IS 'Ops error logs (vNext). Stores sanitized error details and request_body for retries (errors only).';
|
||||
|
||||
-- ============================================
|
||||
-- 2) ops_retry_attempts: audit log for retries
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_retry_attempts (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
|
||||
requested_by_user_id BIGINT,
|
||||
source_error_id BIGINT,
|
||||
|
||||
-- client|upstream
|
||||
mode VARCHAR(16) NOT NULL,
|
||||
pinned_account_id BIGINT,
|
||||
|
||||
-- queued|running|succeeded|failed
|
||||
status VARCHAR(16) NOT NULL DEFAULT 'queued',
|
||||
started_at TIMESTAMPTZ,
|
||||
finished_at TIMESTAMPTZ,
|
||||
duration_ms BIGINT,
|
||||
|
||||
-- Optional result correlation
|
||||
result_request_id VARCHAR(64),
|
||||
result_error_id BIGINT,
|
||||
result_usage_request_id VARCHAR(64),
|
||||
|
||||
error_message TEXT
|
||||
);
|
||||
|
||||
COMMENT ON TABLE ops_retry_attempts IS 'Audit table for ops retries (client retry / pinned upstream retry).';
|
||||
|
||||
-- ============================================
|
||||
-- 3) ops_system_metrics: system + request window snapshots
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_system_metrics (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
window_minutes INT NOT NULL DEFAULT 1,
|
||||
|
||||
-- Optional dimensions (only if collector chooses to write per-dimension snapshots)
|
||||
platform VARCHAR(32),
|
||||
group_id BIGINT,
|
||||
|
||||
-- Core counts
|
||||
success_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_total BIGINT NOT NULL DEFAULT 0,
|
||||
business_limited_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_sla BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_429_count BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_529_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
token_consumed BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- Rates
|
||||
qps DOUBLE PRECISION,
|
||||
tps DOUBLE PRECISION,
|
||||
|
||||
-- Duration percentiles (ms) - success requests
|
||||
duration_p50_ms INT,
|
||||
duration_p90_ms INT,
|
||||
duration_p95_ms INT,
|
||||
duration_p99_ms INT,
|
||||
duration_avg_ms DOUBLE PRECISION,
|
||||
duration_max_ms INT,
|
||||
|
||||
-- TTFT percentiles (ms) - success requests (streaming)
|
||||
ttft_p50_ms INT,
|
||||
ttft_p90_ms INT,
|
||||
ttft_p95_ms INT,
|
||||
ttft_p99_ms INT,
|
||||
ttft_avg_ms DOUBLE PRECISION,
|
||||
ttft_max_ms INT,
|
||||
|
||||
-- System resources
|
||||
cpu_usage_percent DOUBLE PRECISION,
|
||||
memory_used_mb BIGINT,
|
||||
memory_total_mb BIGINT,
|
||||
memory_usage_percent DOUBLE PRECISION,
|
||||
|
||||
-- Dependency health (best-effort)
|
||||
db_ok BOOLEAN,
|
||||
redis_ok BOOLEAN,
|
||||
|
||||
-- DB pool & runtime
|
||||
db_conn_active INT,
|
||||
db_conn_idle INT,
|
||||
db_conn_waiting INT,
|
||||
goroutine_count INT,
|
||||
|
||||
-- Queue / concurrency
|
||||
concurrency_queue_depth INT
|
||||
);
|
||||
|
||||
COMMENT ON TABLE ops_system_metrics IS 'Ops system/request metrics snapshots (vNext). Used for dashboard overview and realtime rates.';
|
||||
|
||||
-- ============================================
|
||||
-- 4) ops_job_heartbeats: background jobs health
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_job_heartbeats (
|
||||
job_name VARCHAR(64) PRIMARY KEY,
|
||||
|
||||
last_run_at TIMESTAMPTZ,
|
||||
last_success_at TIMESTAMPTZ,
|
||||
last_error_at TIMESTAMPTZ,
|
||||
last_error TEXT,
|
||||
last_duration_ms BIGINT,
|
||||
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
COMMENT ON TABLE ops_job_heartbeats IS 'Ops background jobs heartbeats (vNext).';
|
||||
|
||||
-- ============================================
|
||||
-- 5) ops_alert_rules / ops_alert_events
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_alert_rules (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
name VARCHAR(128) NOT NULL,
|
||||
description TEXT,
|
||||
enabled BOOLEAN NOT NULL DEFAULT true,
|
||||
|
||||
severity VARCHAR(16) NOT NULL DEFAULT 'warning',
|
||||
|
||||
-- Metric definition
|
||||
-- Metric definition
|
||||
metric_type VARCHAR(64) NOT NULL,
|
||||
operator VARCHAR(8) NOT NULL,
|
||||
threshold DOUBLE PRECISION NOT NULL,
|
||||
|
||||
window_minutes INT NOT NULL DEFAULT 5,
|
||||
sustained_minutes INT NOT NULL DEFAULT 5,
|
||||
cooldown_minutes INT NOT NULL DEFAULT 10,
|
||||
|
||||
-- Optional scoping: platform/group filters etc.
|
||||
filters JSONB,
|
||||
|
||||
last_triggered_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_alert_rules_name_unique
|
||||
ON ops_alert_rules (name);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_enabled
|
||||
ON ops_alert_rules (enabled);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_alert_events (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
rule_id BIGINT,
|
||||
severity VARCHAR(16) NOT NULL,
|
||||
status VARCHAR(16) NOT NULL DEFAULT 'firing',
|
||||
|
||||
title VARCHAR(200),
|
||||
description TEXT,
|
||||
|
||||
metric_value DOUBLE PRECISION,
|
||||
threshold_value DOUBLE PRECISION,
|
||||
dimensions JSONB,
|
||||
|
||||
fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
resolved_at TIMESTAMPTZ,
|
||||
|
||||
email_sent BOOLEAN NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_rule_status
|
||||
ON ops_alert_events (rule_id, status);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_fired_at
|
||||
ON ops_alert_events (fired_at DESC);
|
||||
|
||||
-- =====================================================================
|
||||
-- 032_ops_preaggregation_tables.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): pre-aggregation tables
|
||||
--
|
||||
-- Purpose:
|
||||
-- - Provide stable query performance for 1–24h windows (and beyond), avoiding expensive
|
||||
-- percentile_cont scans on raw logs for every dashboard refresh.
|
||||
-- - Support global filter dimensions: overall / platform / group.
|
||||
--
|
||||
-- Design note:
|
||||
-- - We keep a single table with nullable platform/group_id, and enforce uniqueness via a
|
||||
-- COALESCE-based unique index (because UNIQUE with NULLs allows duplicates in Postgres).
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- ============================================
|
||||
-- 1) ops_metrics_hourly
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_metrics_hourly (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
bucket_start TIMESTAMPTZ NOT NULL,
|
||||
platform VARCHAR(32),
|
||||
group_id BIGINT,
|
||||
|
||||
success_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_total BIGINT NOT NULL DEFAULT 0,
|
||||
business_limited_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_sla BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_429_count BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_529_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
token_consumed BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
-- Duration percentiles (ms)
|
||||
duration_p50_ms INT,
|
||||
duration_p90_ms INT,
|
||||
duration_p95_ms INT,
|
||||
duration_p99_ms INT,
|
||||
|
||||
-- TTFT percentiles (ms)
|
||||
ttft_p50_ms INT,
|
||||
ttft_p90_ms INT,
|
||||
ttft_p95_ms INT,
|
||||
ttft_p99_ms INT,
|
||||
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Uniqueness across three “dimension modes” (overall / platform / group).
|
||||
-- Postgres UNIQUE treats NULLs as distinct, so we enforce uniqueness via COALESCE.
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_hourly_unique_dim
|
||||
ON ops_metrics_hourly (
|
||||
bucket_start,
|
||||
COALESCE(platform, ''),
|
||||
COALESCE(group_id, 0)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_bucket
|
||||
ON ops_metrics_hourly (bucket_start DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_platform_bucket
|
||||
ON ops_metrics_hourly (platform, bucket_start DESC)
|
||||
WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_group_bucket
|
||||
ON ops_metrics_hourly (group_id, bucket_start DESC)
|
||||
WHERE group_id IS NOT NULL AND group_id <> 0;
|
||||
|
||||
COMMENT ON TABLE ops_metrics_hourly IS 'vNext hourly pre-aggregated ops metrics (overall/platform/group).';
|
||||
|
||||
-- ============================================
|
||||
-- 2) ops_metrics_daily (optional; for longer windows)
|
||||
-- ============================================
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_metrics_daily (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
bucket_date DATE NOT NULL,
|
||||
platform VARCHAR(32),
|
||||
group_id BIGINT,
|
||||
|
||||
success_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_total BIGINT NOT NULL DEFAULT 0,
|
||||
business_limited_count BIGINT NOT NULL DEFAULT 0,
|
||||
error_count_sla BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_429_count BIGINT NOT NULL DEFAULT 0,
|
||||
upstream_529_count BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
token_consumed BIGINT NOT NULL DEFAULT 0,
|
||||
|
||||
duration_p50_ms INT,
|
||||
duration_p90_ms INT,
|
||||
duration_p95_ms INT,
|
||||
duration_p99_ms INT,
|
||||
|
||||
ttft_p50_ms INT,
|
||||
ttft_p90_ms INT,
|
||||
ttft_p95_ms INT,
|
||||
ttft_p99_ms INT,
|
||||
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_daily_unique_dim
|
||||
ON ops_metrics_daily (
|
||||
bucket_date,
|
||||
COALESCE(platform, ''),
|
||||
COALESCE(group_id, 0)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_bucket
|
||||
ON ops_metrics_daily (bucket_date DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_platform_bucket
|
||||
ON ops_metrics_daily (platform, bucket_date DESC)
|
||||
WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_group_bucket
|
||||
ON ops_metrics_daily (group_id, bucket_date DESC)
|
||||
WHERE group_id IS NOT NULL AND group_id <> 0;
|
||||
|
||||
COMMENT ON TABLE ops_metrics_daily IS 'vNext daily pre-aggregated ops metrics (overall/platform/group).';
|
||||
|
||||
-- =====================================================================
|
||||
-- 033_ops_indexes_and_extensions.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): indexes and optional extensions
|
||||
--
|
||||
-- This migration intentionally keeps "optional" objects (like pg_trgm) best-effort,
|
||||
-- so environments without extension privileges won't fail the whole migration chain.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- ============================================
|
||||
-- 1) Core btree indexes (always safe)
|
||||
-- ============================================
|
||||
|
||||
-- ops_error_logs
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_created_at
|
||||
ON ops_error_logs (created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_platform_time
|
||||
ON ops_error_logs (platform, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_group_time
|
||||
ON ops_error_logs (group_id, created_at DESC)
|
||||
WHERE group_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_account_time
|
||||
ON ops_error_logs (account_id, created_at DESC)
|
||||
WHERE account_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_status_time
|
||||
ON ops_error_logs (status_code, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase_time
|
||||
ON ops_error_logs (error_phase, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_type_time
|
||||
ON ops_error_logs (error_type, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id
|
||||
ON ops_error_logs (request_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id
|
||||
ON ops_error_logs (client_request_id);
|
||||
|
||||
-- ops_system_metrics
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_created_at
|
||||
ON ops_system_metrics (created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_window_time
|
||||
ON ops_system_metrics (window_minutes, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_platform_time
|
||||
ON ops_system_metrics (platform, created_at DESC)
|
||||
WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_group_time
|
||||
ON ops_system_metrics (group_id, created_at DESC)
|
||||
WHERE group_id IS NOT NULL;
|
||||
|
||||
-- ops_retry_attempts
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_created_at
|
||||
ON ops_retry_attempts (created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_source_error
|
||||
ON ops_retry_attempts (source_error_id, created_at DESC)
|
||||
WHERE source_error_id IS NOT NULL;
|
||||
|
||||
-- Prevent concurrent retries for the same ops_error_logs row (race-free, multi-instance safe).
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_retry_attempts_unique_active
|
||||
ON ops_retry_attempts (source_error_id)
|
||||
WHERE source_error_id IS NOT NULL AND status IN ('queued', 'running');
|
||||
|
||||
-- ============================================
|
||||
-- 2) Optional: pg_trgm + trigram indexes for fuzzy search
|
||||
-- ============================================
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
BEGIN
|
||||
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
-- Missing privileges or extension package should not block migrations.
|
||||
RAISE NOTICE 'pg_trgm extension not created: %', SQLERRM;
|
||||
END;
|
||||
|
||||
IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_trgm') THEN
|
||||
-- request_id / client_request_id fuzzy search
|
||||
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id_trgm
|
||||
ON ops_error_logs USING gin (request_id gin_trgm_ops)';
|
||||
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id_trgm
|
||||
ON ops_error_logs USING gin (client_request_id gin_trgm_ops)';
|
||||
|
||||
-- error_message fuzzy search
|
||||
EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_error_message_trgm
|
||||
ON ops_error_logs USING gin (error_message gin_trgm_ops)';
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- =====================================================================
|
||||
-- 034_ops_preaggregation_add_avg_max.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): extend pre-aggregation tables with avg/max latency fields
|
||||
--
|
||||
-- Why:
|
||||
-- - The dashboard overview returns avg/max for duration/TTFT.
|
||||
-- - Hourly/daily pre-aggregation tables originally stored only p50/p90/p95/p99, which makes
|
||||
-- it impossible to answer avg/max in preagg mode without falling back to raw scans.
|
||||
--
|
||||
-- This migration is idempotent and safe to run multiple times.
|
||||
--
|
||||
-- NOTE: We keep the existing p50/p90/p95/p99 columns as-is; these are still used for
|
||||
-- approximate long-window summaries.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- Hourly table
|
||||
ALTER TABLE ops_metrics_hourly
|
||||
ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION,
|
||||
ADD COLUMN IF NOT EXISTS duration_max_ms INT,
|
||||
ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION,
|
||||
ADD COLUMN IF NOT EXISTS ttft_max_ms INT;
|
||||
|
||||
-- Daily table
|
||||
ALTER TABLE ops_metrics_daily
|
||||
ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION,
|
||||
ADD COLUMN IF NOT EXISTS duration_max_ms INT,
|
||||
ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION,
|
||||
ADD COLUMN IF NOT EXISTS ttft_max_ms INT;
|
||||
|
||||
-- =====================================================================
|
||||
-- 035_ops_alert_rules_notify_email.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): alert rule notify settings
|
||||
--
|
||||
-- Adds notify_email flag to ops_alert_rules to keep UI parity with the backup Ops dashboard.
|
||||
-- Migration is idempotent.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
ALTER TABLE ops_alert_rules
|
||||
ADD COLUMN IF NOT EXISTS notify_email BOOLEAN NOT NULL DEFAULT true;
|
||||
|
||||
-- =====================================================================
|
||||
-- 036_ops_seed_default_alert_rules.sql
|
||||
-- =====================================================================
|
||||
|
||||
-- Ops Monitoring (vNext): seed default alert rules (idempotent)
|
||||
--
|
||||
-- Goal:
|
||||
-- - Provide "out of the box" alert rules so the Ops dashboard can immediately show alert events.
|
||||
-- - Keep inserts idempotent via ON CONFLICT (name) DO NOTHING.
|
||||
--
|
||||
-- Notes:
|
||||
-- - Thresholds are intentionally conservative defaults and should be tuned per deployment.
|
||||
-- - Metric semantics follow vNext:
|
||||
-- - success_rate / error_rate are based on SLA-scope counts (exclude is_business_limited).
|
||||
-- - upstream_error_rate excludes 429/529.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- 1) High error rate (P1)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'错误率过高',
|
||||
'当错误率超过 5% 且持续 5 分钟时触发告警',
|
||||
true, 'error_rate', '>', 5.0, 5, 5, 'P1', true, 20, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 2) Low success rate (P0)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'成功率过低',
|
||||
'当成功率低于 95% 且持续 5 分钟时触发告警(服务可用性下降)',
|
||||
true, 'success_rate', '<', 95.0, 5, 5, 'P0', true, 15, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 3) P99 latency too high (P2)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'P99延迟过高',
|
||||
'当 P99 延迟超过 3000ms 且持续 10 分钟时触发告警',
|
||||
true, 'p99_latency_ms', '>', 3000.0, 5, 10, 'P2', true, 30, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 4) P95 latency too high (P2)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'P95延迟过高',
|
||||
'当 P95 延迟超过 2000ms 且持续 10 分钟时触发告警',
|
||||
true, 'p95_latency_ms', '>', 2000.0, 5, 10, 'P2', true, 30, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 5) CPU usage too high (P2)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'CPU使用率过高',
|
||||
'当 CPU 使用率超过 85% 且持续 10 分钟时触发告警',
|
||||
true, 'cpu_usage_percent', '>', 85.0, 5, 10, 'P2', true, 30, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 6) Memory usage too high (P1)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'内存使用率过高',
|
||||
'当内存使用率超过 90% 且持续 10 分钟时触发告警(可能导致 OOM)',
|
||||
true, 'memory_usage_percent', '>', 90.0, 5, 10, 'P1', true, 20, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 7) Concurrency queue buildup (P1)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'并发队列积压',
|
||||
'当并发队列深度超过 100 且持续 5 分钟时触发告警(系统处理能力不足)',
|
||||
true, 'concurrency_queue_depth', '>', 100.0, 5, 5, 'P1', true, 20, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- 8) Extremely high error rate (P0)
|
||||
INSERT INTO ops_alert_rules (
|
||||
name, description, enabled, metric_type, operator, threshold,
|
||||
window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes,
|
||||
created_at, updated_at
|
||||
) VALUES (
|
||||
'错误率极高',
|
||||
'当错误率超过 20% 且持续 1 分钟时触发告警(服务严重异常)',
|
||||
true, 'error_rate', '>', 20.0, 1, 1, 'P0', true, 15, NOW(), NOW()
|
||||
) ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- Ops Monitoring vNext: add Redis pool stats fields to system metrics snapshots.
|
||||
-- This migration is intentionally idempotent.
|
||||
|
||||
ALTER TABLE ops_system_metrics
|
||||
ADD COLUMN IF NOT EXISTS redis_conn_total INT,
|
||||
ADD COLUMN IF NOT EXISTS redis_conn_idle INT;
|
||||
|
||||
COMMENT ON COLUMN ops_system_metrics.redis_conn_total IS 'Redis pool total connections (go-redis PoolStats.TotalConns).';
|
||||
COMMENT ON COLUMN ops_system_metrics.redis_conn_idle IS 'Redis pool idle connections (go-redis PoolStats.IdleConns).';
|
||||
9
backend/migrations/034_ops_upstream_error_events.sql
Normal file
9
backend/migrations/034_ops_upstream_error_events.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- Add upstream error events list (JSONB) to ops_error_logs for per-request correlation.
|
||||
--
|
||||
-- This is intentionally idempotent.
|
||||
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN IF NOT EXISTS upstream_errors JSONB;
|
||||
|
||||
COMMENT ON COLUMN ops_error_logs.upstream_errors IS
|
||||
'Sanitized upstream error events list (JSON array), correlated per gateway request (request_id/client_request_id); used for per-request upstream debugging.';
|
||||
@@ -0,0 +1,77 @@
|
||||
-- Usage dashboard aggregation tables (hourly/daily) + active-user dedup + watermark.
|
||||
-- These tables support Admin Dashboard statistics without full-table scans on usage_logs.
|
||||
|
||||
-- Hourly aggregates (UTC buckets).
|
||||
CREATE TABLE IF NOT EXISTS usage_dashboard_hourly (
|
||||
bucket_start TIMESTAMPTZ PRIMARY KEY,
|
||||
total_requests BIGINT NOT NULL DEFAULT 0,
|
||||
input_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
output_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
cache_creation_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
cache_read_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
total_duration_ms BIGINT NOT NULL DEFAULT 0,
|
||||
active_users BIGINT NOT NULL DEFAULT 0,
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_bucket_start
|
||||
ON usage_dashboard_hourly (bucket_start DESC);
|
||||
|
||||
COMMENT ON TABLE usage_dashboard_hourly IS 'Pre-aggregated hourly usage metrics for admin dashboard (UTC buckets).';
|
||||
COMMENT ON COLUMN usage_dashboard_hourly.bucket_start IS 'UTC start timestamp of the hour bucket.';
|
||||
COMMENT ON COLUMN usage_dashboard_hourly.computed_at IS 'When the hourly row was last computed/refreshed.';
|
||||
|
||||
-- Daily aggregates (UTC dates).
|
||||
CREATE TABLE IF NOT EXISTS usage_dashboard_daily (
|
||||
bucket_date DATE PRIMARY KEY,
|
||||
total_requests BIGINT NOT NULL DEFAULT 0,
|
||||
input_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
output_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
cache_creation_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
cache_read_tokens BIGINT NOT NULL DEFAULT 0,
|
||||
total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0,
|
||||
total_duration_ms BIGINT NOT NULL DEFAULT 0,
|
||||
active_users BIGINT NOT NULL DEFAULT 0,
|
||||
computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_bucket_date
|
||||
ON usage_dashboard_daily (bucket_date DESC);
|
||||
|
||||
COMMENT ON TABLE usage_dashboard_daily IS 'Pre-aggregated daily usage metrics for admin dashboard (UTC dates).';
|
||||
COMMENT ON COLUMN usage_dashboard_daily.bucket_date IS 'UTC date of the day bucket.';
|
||||
COMMENT ON COLUMN usage_dashboard_daily.computed_at IS 'When the daily row was last computed/refreshed.';
|
||||
|
||||
-- Hourly active user dedup table.
|
||||
CREATE TABLE IF NOT EXISTS usage_dashboard_hourly_users (
|
||||
bucket_start TIMESTAMPTZ NOT NULL,
|
||||
user_id BIGINT NOT NULL,
|
||||
PRIMARY KEY (bucket_start, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_users_bucket_start
|
||||
ON usage_dashboard_hourly_users (bucket_start);
|
||||
|
||||
-- Daily active user dedup table.
|
||||
CREATE TABLE IF NOT EXISTS usage_dashboard_daily_users (
|
||||
bucket_date DATE NOT NULL,
|
||||
user_id BIGINT NOT NULL,
|
||||
PRIMARY KEY (bucket_date, user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_users_bucket_date
|
||||
ON usage_dashboard_daily_users (bucket_date);
|
||||
|
||||
-- Aggregation watermark table (single row).
|
||||
CREATE TABLE IF NOT EXISTS usage_dashboard_aggregation_watermark (
|
||||
id INT PRIMARY KEY,
|
||||
last_aggregated_at TIMESTAMPTZ NOT NULL DEFAULT TIMESTAMPTZ '1970-01-01 00:00:00+00',
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
INSERT INTO usage_dashboard_aggregation_watermark (id)
|
||||
VALUES (1)
|
||||
ON CONFLICT (id) DO NOTHING;
|
||||
54
backend/migrations/035_usage_logs_partitioning.sql
Normal file
54
backend/migrations/035_usage_logs_partitioning.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- usage_logs monthly partition bootstrap.
|
||||
-- Only creates partitions when usage_logs is already partitioned.
|
||||
-- Converting usage_logs to a partitioned table requires a manual migration plan.
|
||||
|
||||
DO $$
|
||||
DECLARE
|
||||
is_partitioned BOOLEAN := FALSE;
|
||||
has_data BOOLEAN := FALSE;
|
||||
month_start DATE;
|
||||
prev_month DATE;
|
||||
next_month DATE;
|
||||
BEGIN
|
||||
SELECT EXISTS(
|
||||
SELECT 1
|
||||
FROM pg_partitioned_table pt
|
||||
JOIN pg_class c ON c.oid = pt.partrelid
|
||||
WHERE c.relname = 'usage_logs'
|
||||
) INTO is_partitioned;
|
||||
|
||||
IF NOT is_partitioned THEN
|
||||
SELECT EXISTS(SELECT 1 FROM usage_logs LIMIT 1) INTO has_data;
|
||||
IF NOT has_data THEN
|
||||
-- Automatic conversion is intentionally skipped; see manual migration plan.
|
||||
RAISE NOTICE 'usage_logs is not partitioned; skip automatic partitioning';
|
||||
END IF;
|
||||
END IF;
|
||||
|
||||
IF is_partitioned THEN
|
||||
month_start := date_trunc('month', now() AT TIME ZONE 'UTC')::date;
|
||||
prev_month := (month_start - INTERVAL '1 month')::date;
|
||||
next_month := (month_start + INTERVAL '1 month')::date;
|
||||
|
||||
EXECUTE format(
|
||||
'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)',
|
||||
to_char(prev_month, 'YYYYMM'),
|
||||
prev_month,
|
||||
month_start
|
||||
);
|
||||
|
||||
EXECUTE format(
|
||||
'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)',
|
||||
to_char(month_start, 'YYYYMM'),
|
||||
month_start,
|
||||
next_month
|
||||
);
|
||||
|
||||
EXECUTE format(
|
||||
'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)',
|
||||
to_char(next_month, 'YYYYMM'),
|
||||
next_month,
|
||||
(next_month + INTERVAL '1 month')::date
|
||||
);
|
||||
END IF;
|
||||
END $$;
|
||||
@@ -0,0 +1,16 @@
|
||||
-- Migration: 添加 is_count_tokens 字段到 ops_error_logs 表
|
||||
-- Purpose: 标记 count_tokens 请求的错误,以便在统计和告警中根据配置动态过滤
|
||||
-- Author: System
|
||||
-- Date: 2026-01-12
|
||||
|
||||
-- Add is_count_tokens column to ops_error_logs table
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN is_count_tokens BOOLEAN NOT NULL DEFAULT FALSE;
|
||||
|
||||
-- Add comment
|
||||
COMMENT ON COLUMN ops_error_logs.is_count_tokens IS '是否为 count_tokens 请求的错误(用于统计过滤)';
|
||||
|
||||
-- Create index for filtering (optional, improves query performance)
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_is_count_tokens
|
||||
ON ops_error_logs(is_count_tokens)
|
||||
WHERE is_count_tokens = TRUE;
|
||||
10
backend/migrations/036_scheduler_outbox.sql
Normal file
10
backend/migrations/036_scheduler_outbox.sql
Normal file
@@ -0,0 +1,10 @@
|
||||
CREATE TABLE IF NOT EXISTS scheduler_outbox (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
event_type TEXT NOT NULL,
|
||||
account_id BIGINT NULL,
|
||||
group_id BIGINT NULL,
|
||||
payload JSONB NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_scheduler_outbox_created_at ON scheduler_outbox (created_at);
|
||||
14
backend/migrations/037_add_account_rate_multiplier.sql
Normal file
14
backend/migrations/037_add_account_rate_multiplier.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
-- Add account billing rate multiplier and per-usage snapshot.
|
||||
--
|
||||
-- accounts.rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。
|
||||
-- usage_logs.account_rate_multiplier: 每条 usage log 的账号倍率快照,用于实现
|
||||
-- “倍率调整仅影响之后请求”,并支持同一天分段倍率加权统计。
|
||||
--
|
||||
-- 注意:usage_logs.account_rate_multiplier 不做回填、不设置 NOT NULL。
|
||||
-- 老数据为 NULL 时,统计口径按 1.0 处理(COALESCE)。
|
||||
|
||||
ALTER TABLE IF EXISTS accounts
|
||||
ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10,4) NOT NULL DEFAULT 1.0;
|
||||
|
||||
ALTER TABLE IF EXISTS usage_logs
|
||||
ADD COLUMN IF NOT EXISTS account_rate_multiplier DECIMAL(10,4);
|
||||
28
backend/migrations/037_ops_alert_silences.sql
Normal file
28
backend/migrations/037_ops_alert_silences.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
-- Ops alert silences: scoped (rule_id + platform + group_id + region)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ops_alert_silences (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
|
||||
rule_id BIGINT NOT NULL,
|
||||
platform VARCHAR(64) NOT NULL,
|
||||
group_id BIGINT,
|
||||
region VARCHAR(64),
|
||||
|
||||
until TIMESTAMPTZ NOT NULL,
|
||||
reason TEXT,
|
||||
|
||||
created_by BIGINT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_silences_lookup
|
||||
ON ops_alert_silences (rule_id, platform, group_id, region, until);
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE IF EXISTS ops_alert_silences;
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,111 @@
|
||||
-- Add resolution tracking to ops_error_logs, persist retry results, and standardize error classification enums.
|
||||
--
|
||||
-- This migration is intentionally idempotent.
|
||||
|
||||
SET LOCAL lock_timeout = '5s';
|
||||
SET LOCAL statement_timeout = '10min';
|
||||
|
||||
-- ============================================
|
||||
-- 1) ops_error_logs: resolution fields
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN IF NOT EXISTS resolved BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN IF NOT EXISTS resolved_at TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN IF NOT EXISTS resolved_by_user_id BIGINT;
|
||||
|
||||
ALTER TABLE ops_error_logs
|
||||
ADD COLUMN IF NOT EXISTS resolved_retry_id BIGINT;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_resolved_time
|
||||
ON ops_error_logs (resolved, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_unresolved_time
|
||||
ON ops_error_logs (created_at DESC)
|
||||
WHERE resolved = false;
|
||||
|
||||
-- ============================================
|
||||
-- 2) ops_retry_attempts: persist execution results
|
||||
-- ============================================
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS success BOOLEAN;
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS http_status_code INT;
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS upstream_request_id VARCHAR(128);
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS used_account_id BIGINT;
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS response_preview TEXT;
|
||||
|
||||
ALTER TABLE ops_retry_attempts
|
||||
ADD COLUMN IF NOT EXISTS response_truncated BOOLEAN NOT NULL DEFAULT false;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_success_time
|
||||
ON ops_retry_attempts (success, created_at DESC);
|
||||
|
||||
-- Backfill best-effort fields for existing rows.
|
||||
UPDATE ops_retry_attempts
|
||||
SET success = (LOWER(COALESCE(status, '')) = 'succeeded')
|
||||
WHERE success IS NULL;
|
||||
|
||||
UPDATE ops_retry_attempts
|
||||
SET upstream_request_id = result_request_id
|
||||
WHERE upstream_request_id IS NULL AND result_request_id IS NOT NULL;
|
||||
|
||||
-- ============================================
|
||||
-- 3) Standardize classification enums in ops_error_logs
|
||||
--
|
||||
-- New enums:
|
||||
-- error_phase: request|auth|routing|upstream|network|internal
|
||||
-- error_owner: client|provider|platform
|
||||
-- error_source: client_request|upstream_http|gateway
|
||||
-- ============================================
|
||||
|
||||
-- Owner: legacy sub2api => platform.
|
||||
UPDATE ops_error_logs
|
||||
SET error_owner = 'platform'
|
||||
WHERE LOWER(COALESCE(error_owner, '')) = 'sub2api';
|
||||
|
||||
-- Owner: normalize empty/null to platform (best-effort).
|
||||
UPDATE ops_error_logs
|
||||
SET error_owner = 'platform'
|
||||
WHERE COALESCE(TRIM(error_owner), '') = '';
|
||||
|
||||
-- Phase: map legacy phases.
|
||||
UPDATE ops_error_logs
|
||||
SET error_phase = CASE
|
||||
WHEN COALESCE(TRIM(error_phase), '') = '' THEN 'internal'
|
||||
WHEN LOWER(error_phase) IN ('billing', 'concurrency', 'response') THEN 'request'
|
||||
WHEN LOWER(error_phase) IN ('scheduling') THEN 'routing'
|
||||
WHEN LOWER(error_phase) IN ('request', 'auth', 'routing', 'upstream', 'network', 'internal') THEN LOWER(error_phase)
|
||||
ELSE 'internal'
|
||||
END;
|
||||
|
||||
-- Source: map legacy sources.
|
||||
UPDATE ops_error_logs
|
||||
SET error_source = CASE
|
||||
WHEN COALESCE(TRIM(error_source), '') = '' THEN 'gateway'
|
||||
WHEN LOWER(error_source) IN ('billing', 'concurrency') THEN 'client_request'
|
||||
WHEN LOWER(error_source) IN ('upstream_http') THEN 'upstream_http'
|
||||
WHEN LOWER(error_source) IN ('upstream_network') THEN 'gateway'
|
||||
WHEN LOWER(error_source) IN ('internal') THEN 'gateway'
|
||||
WHEN LOWER(error_source) IN ('client_request', 'upstream_http', 'gateway') THEN LOWER(error_source)
|
||||
ELSE 'gateway'
|
||||
END;
|
||||
|
||||
-- Auto-resolve recovered upstream errors (client status < 400).
|
||||
UPDATE ops_error_logs
|
||||
SET
|
||||
resolved = true,
|
||||
resolved_at = COALESCE(resolved_at, created_at)
|
||||
WHERE resolved = false AND COALESCE(status_code, 0) > 0 AND COALESCE(status_code, 0) < 400;
|
||||
178
backend/migrations/README.md
Normal file
178
backend/migrations/README.md
Normal file
@@ -0,0 +1,178 @@
|
||||
# Database Migrations
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains SQL migration files for database schema changes. The migration system uses SHA256 checksums to ensure migration immutability and consistency across environments.
|
||||
|
||||
## Migration File Naming
|
||||
|
||||
Format: `NNN_description.sql`
|
||||
- `NNN`: Sequential number (e.g., 001, 002, 003)
|
||||
- `description`: Brief description in snake_case
|
||||
|
||||
Example: `017_add_gemini_tier_id.sql`
|
||||
|
||||
## Migration File Structure
|
||||
|
||||
```sql
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
-- Your forward migration SQL here
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
-- Your rollback migration SQL here
|
||||
-- +goose StatementEnd
|
||||
```
|
||||
|
||||
## Important Rules
|
||||
|
||||
### ⚠️ Immutability Principle
|
||||
|
||||
**Once a migration is applied to ANY environment (dev, staging, production), it MUST NOT be modified.**
|
||||
|
||||
Why?
|
||||
- Each migration has a SHA256 checksum stored in the `schema_migrations` table
|
||||
- Modifying an applied migration causes checksum mismatch errors
|
||||
- Different environments would have inconsistent database states
|
||||
- Breaks audit trail and reproducibility
|
||||
|
||||
### ✅ Correct Workflow
|
||||
|
||||
1. **Create new migration**
|
||||
```bash
|
||||
# Create new file with next sequential number
|
||||
touch migrations/018_your_change.sql
|
||||
```
|
||||
|
||||
2. **Write Up and Down migrations**
|
||||
- Up: Apply the change
|
||||
- Down: Revert the change (should be symmetric with Up)
|
||||
|
||||
3. **Test locally**
|
||||
```bash
|
||||
# Apply migration
|
||||
make migrate-up
|
||||
|
||||
# Test rollback
|
||||
make migrate-down
|
||||
```
|
||||
|
||||
4. **Commit and deploy**
|
||||
```bash
|
||||
git add migrations/018_your_change.sql
|
||||
git commit -m "feat(db): add your change"
|
||||
```
|
||||
|
||||
### ❌ What NOT to Do
|
||||
|
||||
- ❌ Modify an already-applied migration file
|
||||
- ❌ Delete migration files
|
||||
- ❌ Change migration file names
|
||||
- ❌ Reorder migration numbers
|
||||
|
||||
### 🔧 If You Accidentally Modified an Applied Migration
|
||||
|
||||
**Error message:**
|
||||
```
|
||||
migration 017_add_gemini_tier_id.sql checksum mismatch (db=abc123... file=def456...)
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# 1. Find the original version
|
||||
git log --oneline -- migrations/017_add_gemini_tier_id.sql
|
||||
|
||||
# 2. Revert to the commit when it was first applied
|
||||
git checkout <commit-hash> -- migrations/017_add_gemini_tier_id.sql
|
||||
|
||||
# 3. Create a NEW migration for your changes
|
||||
touch migrations/018_your_new_change.sql
|
||||
```
|
||||
|
||||
## Migration System Details
|
||||
|
||||
- **Checksum Algorithm**: SHA256 of trimmed file content
|
||||
- **Tracking Table**: `schema_migrations` (filename, checksum, applied_at)
|
||||
- **Runner**: `internal/repository/migrations_runner.go`
|
||||
- **Auto-run**: Migrations run automatically on service startup
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Keep migrations small and focused**
|
||||
- One logical change per migration
|
||||
- Easier to review and rollback
|
||||
|
||||
2. **Write reversible migrations**
|
||||
- Always provide a working Down migration
|
||||
- Test rollback before committing
|
||||
|
||||
3. **Use transactions**
|
||||
- Wrap DDL statements in transactions when possible
|
||||
- Ensures atomicity
|
||||
|
||||
4. **Add comments**
|
||||
- Explain WHY the change is needed
|
||||
- Document any special considerations
|
||||
|
||||
5. **Test in development first**
|
||||
- Apply migration locally
|
||||
- Verify data integrity
|
||||
- Test rollback
|
||||
|
||||
## Example Migration
|
||||
|
||||
```sql
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
-- Add tier_id field to Gemini OAuth accounts for quota tracking
|
||||
UPDATE accounts
|
||||
SET credentials = jsonb_set(
|
||||
credentials,
|
||||
'{tier_id}',
|
||||
'"LEGACY"',
|
||||
true
|
||||
)
|
||||
WHERE platform = 'gemini'
|
||||
AND type = 'oauth'
|
||||
AND credentials->>'tier_id' IS NULL;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
-- Remove tier_id field
|
||||
UPDATE accounts
|
||||
SET credentials = credentials - 'tier_id'
|
||||
WHERE platform = 'gemini'
|
||||
AND type = 'oauth'
|
||||
AND credentials->>'tier_id' = 'LEGACY';
|
||||
-- +goose StatementEnd
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Checksum Mismatch
|
||||
See "If You Accidentally Modified an Applied Migration" above.
|
||||
|
||||
### Migration Failed
|
||||
```bash
|
||||
# Check migration status
|
||||
psql -d sub2api -c "SELECT * FROM schema_migrations ORDER BY applied_at DESC;"
|
||||
|
||||
# Manually rollback if needed (use with caution)
|
||||
# Better to fix the migration and create a new one
|
||||
```
|
||||
|
||||
### Need to Skip a Migration (Emergency Only)
|
||||
```sql
|
||||
-- DANGEROUS: Only use in development or with extreme caution
|
||||
INSERT INTO schema_migrations (filename, checksum, applied_at)
|
||||
VALUES ('NNN_migration.sql', 'calculated_checksum', NOW());
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- Migration runner: `internal/repository/migrations_runner.go`
|
||||
- Goose syntax: https://github.com/pressly/goose
|
||||
- PostgreSQL docs: https://www.postgresql.org/docs/
|
||||
34
backend/migrations/migrations.go
Normal file
34
backend/migrations/migrations.go
Normal file
@@ -0,0 +1,34 @@
|
||||
// Package migrations 包含嵌入的 SQL 数据库迁移文件。
|
||||
//
|
||||
// 该包使用 Go 1.16+ 的 embed 功能将 SQL 文件嵌入到编译后的二进制文件中。
|
||||
// 这种方式的优点:
|
||||
// - 部署时无需额外的迁移文件
|
||||
// - 迁移文件与代码版本一致
|
||||
// - 便于版本控制和代码审查
|
||||
package migrations
|
||||
|
||||
import "embed"
|
||||
|
||||
// FS 包含本目录下所有嵌入的 SQL 迁移文件。
|
||||
//
|
||||
// 迁移命名规范:
|
||||
// - 使用零填充的数字前缀确保正确的执行顺序
|
||||
// - 格式:NNN_description.sql(如 001_init.sql, 002_add_users.sql)
|
||||
// - 描述部分使用下划线分隔的小写单词
|
||||
//
|
||||
// 迁移文件要求:
|
||||
// - 必须是幂等的(可重复执行而不产生错误)
|
||||
// - 推荐使用 IF NOT EXISTS / IF EXISTS 语法
|
||||
// - 一旦应用,不应修改已有的迁移文件(通过 checksum 校验)
|
||||
//
|
||||
// 示例迁移文件:
|
||||
//
|
||||
// -- 001_init.sql
|
||||
// CREATE TABLE IF NOT EXISTS users (
|
||||
// id BIGSERIAL PRIMARY KEY,
|
||||
// email VARCHAR(255) NOT NULL UNIQUE,
|
||||
// created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
// );
|
||||
//
|
||||
//go:embed *.sql
|
||||
var FS embed.FS
|
||||
Reference in New Issue
Block a user