Merge pull request #1529 from IanShaw027/feat/group-messages-dispatch-redo

feat: 为openai分组增加messages调度模型映射并支持instructions模板注入
This commit is contained in:
Wesley Liddick
2026-04-09 21:14:38 +08:00
committed by GitHub
34 changed files with 2954 additions and 920 deletions

View File

@@ -0,0 +1,5 @@
You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer.
{{ if .ExistingInstructions }}
{{ .ExistingInstructions }}
{{ end }}

View File

@@ -202,6 +202,32 @@ gateway:
#
# 注意:开启后会影响所有客户端的行为(不仅限于 VS Code / Codex CLI请谨慎开启。
force_codex_cli: false
# Optional: template file used to build the final top-level Codex `instructions`.
# 可选:用于构建最终 Codex 顶层 `instructions` 的模板文件路径。
#
# This is applied on the `/v1/messages -> Responses/Codex` conversion path,
# after Claude `system` has already been normalized into Codex `instructions`.
# 该模板作用于 `/v1/messages -> Responses/Codex` 转换链路,且发生在 Claude `system`
# 已经被归一化为 Codex `instructions` 之后。
#
# The template can reference:
# 模板可引用:
# - {{ .ExistingInstructions }} : converted client instructions/system
# - {{ .OriginalModel }} : original requested model
# - {{ .NormalizedModel }} : normalized routing model
# - {{ .BillingModel }} : billing model
# - {{ .UpstreamModel }} : final upstream model
#
# If you want to preserve client system prompts, keep {{ .ExistingInstructions }}
# somewhere in the template. If omitted, the template output fully replaces it.
# 如需保留客户端 system 提示词,请在模板中显式包含 {{ .ExistingInstructions }}。
# 若省略,则模板输出会完全覆盖它。
#
# Docker users can mount a host file to /app/data/codex-instructions.md.tmpl
# and point this field there.
# Docker 用户可将宿主机文件挂载到 /app/data/codex-instructions.md.tmpl
# 然后把本字段指向该路径。
forced_codex_instructions_template_file: ""
# OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout
# 默认 false过滤超时头降低上游提前断流风险。
openai_passthrough_allow_timeout_headers: false
@@ -347,12 +373,6 @@ gateway:
# Enable batch load calculation for scheduling
# 启用调度批量负载计算
load_batch_enabled: true
# Snapshot bucket MGET chunk size
# 调度快照分桶读取时的 MGET 分块大小
snapshot_mget_chunk_size: 128
# Snapshot bucket write chunk size
# 调度快照重建写入时的分块大小
snapshot_write_chunk_size: 256
# Slot cleanup interval (duration)
# 并发槽位清理周期(时间段)
slot_cleanup_interval: 30s

View File

@@ -31,6 +31,10 @@ services:
# Optional: Mount custom config.yaml (uncomment and create the file first)
# Copy config.example.yaml to config.yaml, modify it, then uncomment:
# - ./config.yaml:/app/data/config.yaml
# Optional: Mount a custom Codex instructions template file, then point
# gateway.forced_codex_instructions_template_file at /app/data/codex-instructions.md.tmpl
# in config.yaml.
# - ./codex-instructions.md.tmpl:/app/data/codex-instructions.md.tmpl:ro
environment:
# =======================================================================
# Auto Setup (REQUIRED for Docker deployment)
@@ -146,7 +150,17 @@ services:
networks:
- sub2api-network
healthcheck:
test: ["CMD", "wget", "-q", "-T", "5", "-O", "/dev/null", "http://localhost:8080/health"]
test:
[
"CMD",
"wget",
"-q",
"-T",
"5",
"-O",
"/dev/null",
"http://localhost:8080/health",
]
interval: 30s
timeout: 10s
retries: 3
@@ -177,11 +191,17 @@ services:
networks:
- sub2api-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"]
test:
[
"CMD-SHELL",
"pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
ports:
- 5432:5432
# 注意:不暴露端口到宿主机,应用通过内部网络连接
# 如需调试可临时添加ports: ["127.0.0.1:5433:5432"]
@@ -199,12 +219,12 @@ services:
volumes:
- redis_data:/data
command: >
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
sh -c '
redis-server
--save 60 1
--appendonly yes
--appendfsync everysec
${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}'
environment:
- TZ=${TZ:-Asia/Shanghai}
# REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag)
@@ -217,7 +237,8 @@ services:
timeout: 5s
retries: 5
start_period: 5s
ports:
- 6379:6379
# =============================================================================
# Volumes
# =============================================================================