feat(sync): full code sync from release

This commit is contained in:
yangjianbo
2026-02-28 15:01:20 +08:00
parent bfc7b339f7
commit bb664d9bbf
338 changed files with 54513 additions and 2011 deletions

View File

@@ -66,11 +66,15 @@ LOG_SAMPLING_INITIAL=100
# 之后每 N 条保留 1 条
LOG_SAMPLING_THEREAFTER=100
# Global max request body size in bytes (default: 100MB)
# 全局最大请求体大小(字节,默认 100MB
# Global max request body size in bytes (default: 256MB)
# 全局最大请求体大小(字节,默认 256MB
# Applies to all requests, especially important for h2c first request memory protection
# 适用于所有请求,对 h2c 第一请求的内存保护尤为重要
SERVER_MAX_REQUEST_BODY_SIZE=104857600
SERVER_MAX_REQUEST_BODY_SIZE=268435456
# Gateway max request body size in bytes (default: 256MB)
# 网关请求体最大字节数(默认 256MB
GATEWAY_MAX_BODY_SIZE=268435456
# Enable HTTP/2 Cleartext (h2c) for client connections
# 启用 HTTP/2 Cleartext (h2c) 客户端连接

View File

@@ -0,0 +1,78 @@
# datamanagementd 部署说明(数据管理)
本文说明如何在宿主机部署 `datamanagementd`,并与主进程联动开启“数据管理”功能。
## 1. 关键约束
- 主进程固定探测路径:`/tmp/sub2api-datamanagement.sock`
- 仅当该 Unix Socket 可连通且 `Health` 成功时,后台“数据管理”才会启用
- `datamanagementd` 使用 SQLite 持久化元数据,不依赖主库
## 2. 宿主机构建与运行
```bash
cd /opt/sub2api-src/datamanagement
go build -o /opt/sub2api/datamanagementd ./cmd/datamanagementd
mkdir -p /var/lib/sub2api/datamanagement
chown -R sub2api:sub2api /var/lib/sub2api/datamanagement
```
手动启动示例:
```bash
/opt/sub2api/datamanagementd \
-socket-path /tmp/sub2api-datamanagement.sock \
-sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \
-version 1.0.0
```
## 3. systemd 托管(推荐)
仓库已提供示例服务文件:`deploy/sub2api-datamanagementd.service`
```bash
sudo cp deploy/sub2api-datamanagementd.service /etc/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable --now sub2api-datamanagementd
sudo systemctl status sub2api-datamanagementd
```
查看日志:
```bash
sudo journalctl -u sub2api-datamanagementd -f
```
也可以使用一键安装脚本(自动安装二进制 + 注册 systemd
```bash
# 方式一:使用现成二进制
sudo ./deploy/install-datamanagementd.sh --binary /path/to/datamanagementd
# 方式二:从源码构建后安装
sudo ./deploy/install-datamanagementd.sh --source /path/to/sub2api
```
## 4. Docker 部署联动
`sub2api` 运行在 Docker 容器中,需要将宿主机 Socket 挂载到容器同路径:
```yaml
services:
sub2api:
volumes:
- /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock
```
建议在 `docker-compose.override.yml` 中维护该挂载,避免覆盖主 compose 文件。
## 5. 依赖检查
`datamanagementd` 执行备份时依赖以下工具:
- `pg_dump`
- `redis-cli`
- `docker`(仅 `source_mode=docker_exec` 时)
缺失依赖会导致对应任务失败,并在任务详情中体现错误信息。

View File

@@ -19,7 +19,10 @@ This directory contains files for deploying Sub2API on Linux servers.
| `.env.example` | Docker environment variables template |
| `DOCKER.md` | Docker Hub documentation |
| `install.sh` | One-click binary installation script |
| `install-datamanagementd.sh` | datamanagementd 一键安装脚本 |
| `sub2api.service` | Systemd service unit file |
| `sub2api-datamanagementd.service` | datamanagementd systemd service unit file |
| `DATAMANAGEMENTD_CN.md` | datamanagementd 部署与联动说明(中文) |
| `config.example.yaml` | Example configuration file |
---
@@ -145,6 +148,14 @@ SELECT
(SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count;
```
### datamanagementd数据管理联动
如需启用管理后台“数据管理”功能,请额外部署宿主机 `datamanagementd`
- 主进程固定探测 `/tmp/sub2api-datamanagement.sock`
- Docker 场景下需把宿主机 Socket 挂载到容器内同路径
- 详细步骤见:`deploy/DATAMANAGEMENTD_CN.md`
### Commands
For **local directory version** (docker-compose.local.yml):
@@ -575,7 +586,7 @@ gateway:
name: "Profile 2"
cipher_suites: [4866, 4867, 4865, 49199, 49195, 49200, 49196]
curves: [29, 23, 24]
point_formats: [0]
point_formats: 0
# Another custom profile
profile_3:

View File

@@ -27,11 +27,11 @@ server:
# Trusted proxies for X-Forwarded-For parsing (CIDR/IP). Empty disables trusted proxies.
# 信任的代理地址CIDR/IP 格式),用于解析 X-Forwarded-For 头。留空则禁用代理信任。
trusted_proxies: []
# Global max request body size in bytes (default: 100MB)
# 全局最大请求体大小(字节,默认 100MB
# Global max request body size in bytes (default: 256MB)
# 全局最大请求体大小(字节,默认 256MB
# Applies to all requests, especially important for h2c first request memory protection
# 适用于所有请求,对 h2c 第一请求的内存保护尤为重要
max_request_body_size: 104857600
max_request_body_size: 268435456
# HTTP/2 Cleartext (h2c) configuration
# HTTP/2 Cleartext (h2c) 配置
h2c:
@@ -143,9 +143,9 @@ gateway:
# Timeout for waiting upstream response headers (seconds)
# 等待上游响应头超时时间(秒)
response_header_timeout: 600
# Max request body size in bytes (default: 100MB)
# 请求体最大字节数(默认 100MB
max_body_size: 104857600
# Max request body size in bytes (default: 256MB)
# 请求体最大字节数(默认 256MB
max_body_size: 268435456
# Max bytes to read for non-stream upstream responses (default: 8MB)
# 非流式上游响应体读取上限(默认 8MB
upstream_response_read_max_bytes: 8388608
@@ -199,6 +199,83 @@ gateway:
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout
# 默认 false过滤超时头降低上游提前断流风险。
openai_passthrough_allow_timeout_headers: false
# OpenAI Responses WebSocket 配置(默认开启,可按需回滚到 HTTP
openai_ws:
# 新版 WS mode 路由(默认关闭)。关闭时保持当前 legacy 实现行为。
mode_router_v2_enabled: false
# ingress 默认模式off|shared|dedicated仅 mode_router_v2_enabled=true 生效)
ingress_mode_default: shared
# 全局总开关,默认 true关闭时所有请求保持原有 HTTP/SSE 路由
enabled: true
# 按账号类型细分开关
oauth_enabled: true
apikey_enabled: true
# 全局强制 HTTP紧急回滚开关
force_http: false
# 允许在 WSv2 下按策略恢复 store=true默认 false
allow_store_recovery: false
# ingress 模式收到 previous_response_not_found 时,自动去掉 previous_response_id 重试一次(默认 true
ingress_previous_response_recovery_enabled: true
# store=false 且无可复用会话连接时的策略:
# strict=强制新建连接隔离优先adaptive=仅在高风险失败后强制新建off=尽量复用(性能优先)
store_disabled_conn_mode: strict
# store=false 且无可复用会话连接时,是否强制新建连接(默认 true优先会话隔离
# 兼容旧配置:仅在 store_disabled_conn_mode 未配置时生效
store_disabled_force_new_conn: true
# 是否启用 WSv2 generate=false 预热(默认 false
prewarm_generate_enabled: false
# 协议 feature 开关v2 优先于 v1
responses_websockets: false
responses_websockets_v2: true
# 连接池参数(按账号池化复用)
max_conns_per_account: 128
min_idle_per_account: 4
max_idle_per_account: 12
# 是否按账号并发动态计算连接池上限:
# effective_max_conns = min(max_conns_per_account, ceil(account.concurrency * factor))
dynamic_max_conns_by_account_concurrency_enabled: true
# 按账号类型分别设置系数OAuth / API Key
oauth_max_conns_factor: 1.0
apikey_max_conns_factor: 1.0
dial_timeout_seconds: 10
read_timeout_seconds: 900
write_timeout_seconds: 120
pool_target_utilization: 0.7
queue_limit_per_conn: 64
# 流式写出批量 flush 参数
event_flush_batch_size: 1
event_flush_interval_ms: 10
# 预热触发冷却(毫秒)
prewarm_cooldown_ms: 300
# WS 回退到 HTTP 后的冷却时间(秒),用于避免 WS/HTTP 来回抖动0 表示关闭冷却
fallback_cooldown_seconds: 30
# WS 重试退避参数(毫秒)
retry_backoff_initial_ms: 120
retry_backoff_max_ms: 2000
# 抖动比例0-1
retry_jitter_ratio: 0.2
# 单次请求 WS 重试总预算(毫秒);建议设置为有限值,避免重试拉高 TTFT 长尾
retry_total_budget_ms: 5000
# payload_schema 日志采样率0-1降低热路径日志放大
payload_log_sample_rate: 0.2
# 调度与粘连参数
lb_top_k: 7
sticky_session_ttl_seconds: 3600
# 会话哈希迁移兼容开关:新 key 未命中时回退读取旧 SHA-256 key
session_hash_read_old_fallback: true
# 会话哈希迁移兼容开关:写入时双写旧 SHA-256 key短 TTL
session_hash_dual_write_old: true
# context 元数据迁移兼容开关:保留旧 ctxkey.* 读取/注入桥接
metadata_bridge_enabled: true
sticky_response_id_ttl_seconds: 3600
# 兼容旧键:当 sticky_response_id_ttl_seconds 缺失时回退该值
sticky_previous_response_ttl_seconds: 3600
scheduler_score_weights:
priority: 1.0
load: 1.0
queue: 0.7
error_rate: 0.8
ttft: 0.5
# HTTP upstream connection pool settings (HTTP/2 + multi-proxy scenario defaults)
# HTTP 上游连接池配置HTTP/2 + 多代理场景默认值)
# Max idle connections across all hosts
@@ -779,12 +856,12 @@ rate_limit:
# 定价数据源(可选)
# =============================================================================
pricing:
# URL to fetch model pricing data (default: LiteLLM)
# 获取模型定价数据的 URL默认LiteLLM
remote_url: "https://github.com/Wei-Shaw/model-price-repo/raw/refs/heads/main/model_prices_and_context_window.json"
# URL to fetch model pricing data (default: pinned model-price-repo commit)
# 获取模型定价数据的 URL默认固定 commit 的 model-price-repo
remote_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.json"
# Hash verification URL (optional)
# 哈希校验 URL可选
hash_url: "https://github.com/Wei-Shaw/model-price-repo/raw/refs/heads/main/model_prices_and_context_window.sha256"
hash_url: "https://raw.githubusercontent.com/Wei-Shaw/model-price-repo/c7947e9871687e664180bc971d4837f1fc2784a9/model_prices_and_context_window.sha256"
# Local data directory for caching
# 本地数据缓存目录
data_dir: "./data"

View File

@@ -127,6 +127,19 @@ services:
# - ./logs:/app/logs
# - ./backups:/app/backups
# =============================================================================
# Scenario 6: 启用宿主机 datamanagementd数据管理
# =============================================================================
# 说明:
# - datamanagementd 运行在宿主机systemd 或手动)
# - 主进程固定探测 /tmp/sub2api-datamanagement.sock
# - 需要把宿主机 socket 挂载到容器内同路径
#
# services:
# sub2api:
# volumes:
# - /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock
# =============================================================================
# Additional Notes
# =============================================================================

123
deploy/install-datamanagementd.sh Executable file
View File

@@ -0,0 +1,123 @@
#!/usr/bin/env bash
set -euo pipefail
# 用法:
# sudo ./install-datamanagementd.sh --binary /path/to/datamanagementd
# 或:
# sudo ./install-datamanagementd.sh --source /path/to/sub2api/repo
BIN_PATH=""
SOURCE_PATH=""
INSTALL_DIR="/opt/sub2api"
DATA_DIR="/var/lib/sub2api/datamanagement"
SERVICE_FILE_NAME="sub2api-datamanagementd.service"
function print_help() {
cat <<'EOF'
用法:
install-datamanagementd.sh [--binary <datamanagementd二进制路径>] [--source <仓库路径>]
参数:
--binary 指定已构建的 datamanagementd 二进制路径
--source 指定 sub2api 仓库路径(脚本会执行 go build
-h, --help 显示帮助
示例:
sudo ./install-datamanagementd.sh --binary ./datamanagement/datamanagementd
sudo ./install-datamanagementd.sh --source /opt/sub2api-src
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--binary)
BIN_PATH="${2:-}"
shift 2
;;
--source)
SOURCE_PATH="${2:-}"
shift 2
;;
-h|--help)
print_help
exit 0
;;
*)
echo "未知参数: $1"
print_help
exit 1
;;
esac
done
if [[ -n "$BIN_PATH" && -n "$SOURCE_PATH" ]]; then
echo "错误: --binary 与 --source 只能二选一"
exit 1
fi
if [[ -z "$BIN_PATH" && -z "$SOURCE_PATH" ]]; then
echo "错误: 必须提供 --binary 或 --source"
exit 1
fi
if [[ "$(id -u)" -ne 0 ]]; then
echo "错误: 请使用 root 权限执行(例如 sudo"
exit 1
fi
if [[ -n "$SOURCE_PATH" ]]; then
if [[ ! -d "$SOURCE_PATH/datamanagement" ]]; then
echo "错误: 无效仓库路径,未找到 $SOURCE_PATH/datamanagement"
exit 1
fi
echo "[1/6] 从源码构建 datamanagementd..."
(cd "$SOURCE_PATH/datamanagement" && go build -o datamanagementd ./cmd/datamanagementd)
BIN_PATH="$SOURCE_PATH/datamanagement/datamanagementd"
fi
if [[ ! -f "$BIN_PATH" ]]; then
echo "错误: 二进制文件不存在: $BIN_PATH"
exit 1
fi
if ! id sub2api >/dev/null 2>&1; then
echo "[2/6] 创建系统用户 sub2api..."
useradd --system --no-create-home --shell /usr/sbin/nologin sub2api
else
echo "[2/6] 系统用户 sub2api 已存在,跳过创建"
fi
echo "[3/6] 安装 datamanagementd 二进制..."
mkdir -p "$INSTALL_DIR"
install -m 0755 "$BIN_PATH" "$INSTALL_DIR/datamanagementd"
echo "[4/6] 准备数据目录..."
mkdir -p "$DATA_DIR"
chown -R sub2api:sub2api /var/lib/sub2api
chmod 0750 "$DATA_DIR"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SERVICE_TEMPLATE="$SCRIPT_DIR/$SERVICE_FILE_NAME"
if [[ ! -f "$SERVICE_TEMPLATE" ]]; then
echo "错误: 未找到服务模板 $SERVICE_TEMPLATE"
exit 1
fi
echo "[5/6] 安装 systemd 服务..."
cp "$SERVICE_TEMPLATE" "/etc/systemd/system/$SERVICE_FILE_NAME"
systemctl daemon-reload
systemctl enable --now sub2api-datamanagementd
echo "[6/6] 完成,当前状态:"
systemctl --no-pager --full status sub2api-datamanagementd || true
cat <<'EOF'
下一步建议:
1. 查看日志sudo journalctl -u sub2api-datamanagementd -f
2. 在 sub2api容器部署时挂载 socket:
/tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock
3. 进入管理后台“数据管理”页面确认 agent=enabled
EOF

View File

@@ -0,0 +1,22 @@
[Unit]
Description=Sub2API Data Management Daemon
After=network.target
Wants=network.target
[Service]
Type=simple
User=sub2api
Group=sub2api
WorkingDirectory=/opt/sub2api
ExecStart=/opt/sub2api/datamanagementd \
-socket-path /tmp/sub2api-datamanagement.sock \
-sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \
-version 1.0.0
Restart=always
RestartSec=5s
LimitNOFILE=100000
NoNewPrivileges=true
PrivateTmp=false
[Install]
WantedBy=multi-user.target