From 6c469b42ed5bf928c680ca7b4c7edc01db34677e Mon Sep 17 00:00:00 2001 From: shaw Date: Mon, 22 Dec 2025 22:58:31 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=E6=94=AF=E6=8C=81cod?= =?UTF-8?q?ex=E8=BD=AC=E5=8F=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/wire.go | 8 + backend/cmd/server/wire_gen.go | 81 +- .../internal/handler/admin/account_handler.go | 102 ++- .../handler/admin/openai_oauth_handler.go | 228 ++++++ backend/internal/handler/gateway_handler.go | 158 +--- backend/internal/handler/gateway_helper.go | 180 +++++ backend/internal/handler/handler.go | 20 +- .../handler/openai_gateway_handler.go | 212 ++++++ backend/internal/handler/wire.go | 24 +- backend/internal/model/account.go | 135 ++++ backend/internal/pkg/oauth/oauth.go | 25 +- backend/internal/pkg/openai/constants.go | 42 ++ backend/internal/pkg/openai/instructions.txt | 118 +++ backend/internal/pkg/openai/oauth.go | 366 +++++++++ backend/internal/pkg/openai/request.go | 18 + backend/internal/repository/account_repo.go | 32 + .../{claude_service.go => http_upstream.go} | 15 +- .../repository/openai_oauth_service.go | 92 +++ backend/internal/repository/wire.go | 3 +- backend/internal/server/router.go | 17 +- .../internal/service/account_test_service.go | 237 +++++- backend/internal/service/gateway_service.go | 33 +- backend/internal/service/identity_service.go | 6 +- backend/internal/service/oauth_service.go | 5 + .../service/openai_gateway_service.go | 700 ++++++++++++++++++ .../internal/service/openai_oauth_service.go | 257 +++++++ backend/internal/service/ports/account.go | 2 + .../internal/service/ports/http_upstream.go | 9 + .../internal/service/ports/openai_oauth.go | 13 + backend/internal/service/service.go | 54 +- .../internal/service/token_refresh_service.go | 5 +- backend/internal/service/token_refresher.go | 51 ++ backend/internal/service/wire.go | 5 +- .../components/account/AccountUsageCell.vue | 29 +- .../components/account/CreateAccountModal.vue | 395 +++++++--- .../components/account/EditAccountModal.vue | 61 +- .../account/OAuthAuthorizationFlow.vue | 93 ++- .../components/account/ReAuthAccountModal.vue | 168 +++-- .../src/components/common/GroupSelector.vue | 16 +- frontend/src/composables/useOpenAIOAuth.ts | 155 ++++ frontend/src/i18n/locales/en.ts | 17 + frontend/src/i18n/locales/zh.ts | 17 + frontend/src/types/index.ts | 2 +- frontend/src/views/admin/AccountsView.vue | 10 +- frontend/src/views/admin/GroupsView.vue | 8 +- frontend/tsconfig.tsbuildinfo | 2 +- 46 files changed, 3749 insertions(+), 477 deletions(-) create mode 100644 backend/internal/handler/admin/openai_oauth_handler.go create mode 100644 backend/internal/handler/gateway_helper.go create mode 100644 backend/internal/handler/openai_gateway_handler.go create mode 100644 backend/internal/pkg/openai/constants.go create mode 100644 backend/internal/pkg/openai/instructions.txt create mode 100644 backend/internal/pkg/openai/oauth.go create mode 100644 backend/internal/pkg/openai/request.go rename backend/internal/repository/{claude_service.go => http_upstream.go} (68%) create mode 100644 backend/internal/repository/openai_oauth_service.go create mode 100644 backend/internal/service/openai_gateway_service.go create mode 100644 backend/internal/service/openai_oauth_service.go create mode 100644 backend/internal/service/ports/http_upstream.go create mode 100644 backend/internal/service/ports/openai_oauth.go create mode 100644 frontend/src/composables/useOpenAIOAuth.ts diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index cd44061b..6abfe612 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -85,6 +85,14 @@ func provideCleanup( services.EmailQueue.Stop() return nil }}, + {"OAuthService", func() error { + services.OAuth.Stop() + return nil + }}, + {"OpenAIOAuthService", func() error { + services.OpenAIOAuth.Stop() + return nil + }}, {"Redis", func() error { return rdb.Close() }}, diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 4fb085a7..8759839e 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -76,13 +76,16 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { groupHandler := admin.NewGroupHandler(adminService) claudeOAuthClient := repository.NewClaudeOAuthClient() oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient) + openAIOAuthClient := repository.NewOpenAIOAuthClient() + openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient) rateLimitService := service.NewRateLimitService(accountRepository, configConfig) claudeUsageFetcher := repository.NewClaudeUsageFetcher() accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher) - claudeUpstream := repository.NewClaudeUpstream(configConfig) - accountTestService := service.NewAccountTestService(accountRepository, oAuthService, claudeUpstream) - accountHandler := admin.NewAccountHandler(adminService, oAuthService, rateLimitService, accountUsageService, accountTestService) + httpUpstream := repository.NewHTTPUpstream(configConfig) + accountTestService := service.NewAccountTestService(accountRepository, oAuthService, openAIOAuthService, httpUpstream) + accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, rateLimitService, accountUsageService, accountTestService) oAuthHandler := admin.NewOAuthHandler(oAuthService) + openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) proxyHandler := admin.NewProxyHandler(adminService) adminRedeemHandler := admin.NewRedeemHandler(adminService) settingHandler := admin.NewSettingHandler(settingService, emailService) @@ -93,7 +96,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { systemHandler := handler.ProvideSystemHandler(updateService) adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) adminUsageHandler := admin.NewUsageHandler(usageLogRepository, apiKeyRepository, usageService, adminService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler) gatewayCache := repository.NewGatewayCache(client) pricingRemoteClient := repository.NewPricingRemoteClient() pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) @@ -103,43 +106,47 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { billingService := service.NewBillingService(configConfig, pricingService) identityCache := repository.NewIdentityCache(client) identityService := service.NewIdentityService(identityCache) - gatewayService := service.NewGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, identityService, claudeUpstream) + gatewayService := service.NewGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, identityService, httpUpstream) concurrencyCache := repository.NewConcurrencyCache(client) concurrencyService := service.NewConcurrencyService(concurrencyCache) gatewayHandler := handler.NewGatewayHandler(gatewayService, userService, concurrencyService, billingCacheService) + openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, billingService, rateLimitService, billingCacheService, httpUpstream) + openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, userService, concurrencyService, billingCacheService) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) - handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, handlerSettingHandler) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) groupService := service.NewGroupService(groupRepository) accountService := service.NewAccountService(accountRepository, groupRepository) proxyService := service.NewProxyService(proxyRepository) - tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, configConfig) + tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, configConfig) services := &service.Services{ - Auth: authService, - User: userService, - ApiKey: apiKeyService, - Group: groupService, - Account: accountService, - Proxy: proxyService, - Redeem: redeemService, - Usage: usageService, - Pricing: pricingService, - Billing: billingService, - BillingCache: billingCacheService, - Admin: adminService, - Gateway: gatewayService, - OAuth: oAuthService, - RateLimit: rateLimitService, - AccountUsage: accountUsageService, - AccountTest: accountTestService, - Setting: settingService, - Email: emailService, - EmailQueue: emailQueueService, - Turnstile: turnstileService, - Subscription: subscriptionService, - Concurrency: concurrencyService, - Identity: identityService, - Update: updateService, - TokenRefresh: tokenRefreshService, + Auth: authService, + User: userService, + ApiKey: apiKeyService, + Group: groupService, + Account: accountService, + Proxy: proxyService, + Redeem: redeemService, + Usage: usageService, + Pricing: pricingService, + Billing: billingService, + BillingCache: billingCacheService, + Admin: adminService, + Gateway: gatewayService, + OpenAIGateway: openAIGatewayService, + OAuth: oAuthService, + OpenAIOAuth: openAIOAuthService, + RateLimit: rateLimitService, + AccountUsage: accountUsageService, + AccountTest: accountTestService, + Setting: settingService, + Email: emailService, + EmailQueue: emailQueueService, + Turnstile: turnstileService, + Subscription: subscriptionService, + Concurrency: concurrencyService, + Identity: identityService, + Update: updateService, + TokenRefresh: tokenRefreshService, } repositories := &repository.Repositories{ User: userRepository, @@ -201,6 +208,14 @@ func provideCleanup( services.EmailQueue.Stop() return nil }}, + {"OAuthService", func() error { + services.OAuth.Stop() + return nil + }}, + {"OpenAIOAuthService", func() error { + services.OpenAIOAuth.Stop() + return nil + }}, {"Redis", func() error { return rdb.Close() }}, diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 4ab10362..3e92d2d4 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -4,6 +4,7 @@ import ( "strconv" "sub2api/internal/pkg/claude" + "sub2api/internal/pkg/openai" "sub2api/internal/pkg/response" "sub2api/internal/service" @@ -26,16 +27,18 @@ func NewOAuthHandler(oauthService *service.OAuthService) *OAuthHandler { type AccountHandler struct { adminService service.AdminService oauthService *service.OAuthService + openaiOAuthService *service.OpenAIOAuthService rateLimitService *service.RateLimitService accountUsageService *service.AccountUsageService accountTestService *service.AccountTestService } // NewAccountHandler creates a new admin account handler -func NewAccountHandler(adminService service.AdminService, oauthService *service.OAuthService, rateLimitService *service.RateLimitService, accountUsageService *service.AccountUsageService, accountTestService *service.AccountTestService) *AccountHandler { +func NewAccountHandler(adminService service.AdminService, oauthService *service.OAuthService, openaiOAuthService *service.OpenAIOAuthService, rateLimitService *service.RateLimitService, accountUsageService *service.AccountUsageService, accountTestService *service.AccountTestService) *AccountHandler { return &AccountHandler{ adminService: adminService, oauthService: oauthService, + openaiOAuthService: openaiOAuthService, rateLimitService: rateLimitService, accountUsageService: accountUsageService, accountTestService: accountTestService, @@ -232,26 +235,47 @@ func (h *AccountHandler) Refresh(c *gin.Context) { return } - // Use OAuth service to refresh token - tokenInfo, err := h.oauthService.RefreshAccountToken(c.Request.Context(), account) - if err != nil { - response.InternalError(c, "Failed to refresh credentials: "+err.Error()) - return - } + var newCredentials map[string]any - // Copy existing credentials to preserve non-token settings (e.g., intercept_warmup_requests) - newCredentials := make(map[string]any) - for k, v := range account.Credentials { - newCredentials[k] = v - } + if account.IsOpenAI() { + // Use OpenAI OAuth service to refresh token + tokenInfo, err := h.openaiOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.InternalError(c, "Failed to refresh credentials: "+err.Error()) + return + } - // Update token-related fields - newCredentials["access_token"] = tokenInfo.AccessToken - newCredentials["token_type"] = tokenInfo.TokenType - newCredentials["expires_in"] = tokenInfo.ExpiresIn - newCredentials["expires_at"] = tokenInfo.ExpiresAt - newCredentials["refresh_token"] = tokenInfo.RefreshToken - newCredentials["scope"] = tokenInfo.Scope + // Build new credentials from token info + newCredentials = h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Preserve non-token settings from existing credentials + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } else { + // Use Anthropic/Claude OAuth service to refresh token + tokenInfo, err := h.oauthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.InternalError(c, "Failed to refresh credentials: "+err.Error()) + return + } + + // Copy existing credentials to preserve non-token settings (e.g., intercept_warmup_requests) + newCredentials = make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + + // Update token-related fields + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = tokenInfo.ExpiresIn + newCredentials["expires_at"] = tokenInfo.ExpiresAt + newCredentials["refresh_token"] = tokenInfo.RefreshToken + newCredentials["scope"] = tokenInfo.Scope + } updatedAccount, err := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ Credentials: newCredentials, @@ -563,6 +587,46 @@ func (h *AccountHandler) GetAvailableModels(c *gin.Context) { return } + // Handle OpenAI accounts + if account.IsOpenAI() { + // For OAuth accounts: return default OpenAI models + if account.IsOAuth() { + response.Success(c, openai.DefaultModels) + return + } + + // For API Key accounts: check model_mapping + mapping := account.GetModelMapping() + if len(mapping) == 0 { + response.Success(c, openai.DefaultModels) + return + } + + // Return mapped models + var models []openai.Model + for requestedModel := range mapping { + var found bool + for _, dm := range openai.DefaultModels { + if dm.ID == requestedModel { + models = append(models, dm) + found = true + break + } + } + if !found { + models = append(models, openai.Model{ + ID: requestedModel, + Object: "model", + Type: "model", + DisplayName: requestedModel, + }) + } + } + response.Success(c, models) + return + } + + // Handle Claude/Anthropic accounts // For OAuth and Setup-Token accounts: return default models if account.IsOAuth() { response.Success(c, claude.DefaultModels) diff --git a/backend/internal/handler/admin/openai_oauth_handler.go b/backend/internal/handler/admin/openai_oauth_handler.go new file mode 100644 index 00000000..2cc6eee4 --- /dev/null +++ b/backend/internal/handler/admin/openai_oauth_handler.go @@ -0,0 +1,228 @@ +package admin + +import ( + "strconv" + + "sub2api/internal/pkg/response" + "sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// OpenAIOAuthHandler handles OpenAI OAuth-related operations +type OpenAIOAuthHandler struct { + openaiOAuthService *service.OpenAIOAuthService + adminService service.AdminService +} + +// NewOpenAIOAuthHandler creates a new OpenAI OAuth handler +func NewOpenAIOAuthHandler(openaiOAuthService *service.OpenAIOAuthService, adminService service.AdminService) *OpenAIOAuthHandler { + return &OpenAIOAuthHandler{ + openaiOAuthService: openaiOAuthService, + adminService: adminService, + } +} + +// OpenAIGenerateAuthURLRequest represents the request for generating OpenAI auth URL +type OpenAIGenerateAuthURLRequest struct { + ProxyID *int64 `json:"proxy_id"` + RedirectURI string `json:"redirect_uri"` +} + +// GenerateAuthURL generates OpenAI OAuth authorization URL +// POST /api/v1/admin/openai/generate-auth-url +func (h *OpenAIOAuthHandler) GenerateAuthURL(c *gin.Context) { + var req OpenAIGenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body + req = OpenAIGenerateAuthURLRequest{} + } + + result, err := h.openaiOAuthService.GenerateAuthURL(c.Request.Context(), req.ProxyID, req.RedirectURI) + if err != nil { + response.InternalError(c, "Failed to generate auth URL: "+err.Error()) + return + } + + response.Success(c, result) +} + +// OpenAIExchangeCodeRequest represents the request for exchanging OpenAI auth code +type OpenAIExchangeCodeRequest struct { + SessionID string `json:"session_id" binding:"required"` + Code string `json:"code" binding:"required"` + RedirectURI string `json:"redirect_uri"` + ProxyID *int64 `json:"proxy_id"` +} + +// ExchangeCode exchanges OpenAI authorization code for tokens +// POST /api/v1/admin/openai/exchange-code +func (h *OpenAIOAuthHandler) ExchangeCode(c *gin.Context) { + var req OpenAIExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.openaiOAuthService.ExchangeCode(c.Request.Context(), &service.OpenAIExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + RedirectURI: req.RedirectURI, + ProxyID: req.ProxyID, + }) + if err != nil { + response.BadRequest(c, "Failed to exchange code: "+err.Error()) + return + } + + response.Success(c, tokenInfo) +} + +// OpenAIRefreshTokenRequest represents the request for refreshing OpenAI token +type OpenAIRefreshTokenRequest struct { + RefreshToken string `json:"refresh_token" binding:"required"` + ProxyID *int64 `json:"proxy_id"` +} + +// RefreshToken refreshes an OpenAI OAuth token +// POST /api/v1/admin/openai/refresh-token +func (h *OpenAIOAuthHandler) RefreshToken(c *gin.Context) { + var req OpenAIRefreshTokenRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + var proxyURL string + if req.ProxyID != nil { + proxy, err := h.adminService.GetProxy(c.Request.Context(), *req.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + tokenInfo, err := h.openaiOAuthService.RefreshToken(c.Request.Context(), req.RefreshToken, proxyURL) + if err != nil { + response.BadRequest(c, "Failed to refresh token: "+err.Error()) + return + } + + response.Success(c, tokenInfo) +} + +// RefreshAccountToken refreshes token for a specific OpenAI account +// POST /api/v1/admin/openai/accounts/:id/refresh +func (h *OpenAIOAuthHandler) RefreshAccountToken(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + // Get account + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.NotFound(c, "Account not found") + return + } + + // Ensure account is OpenAI platform + if !account.IsOpenAI() { + response.BadRequest(c, "Account is not an OpenAI account") + return + } + + // Only refresh OAuth-based accounts + if !account.IsOAuth() { + response.BadRequest(c, "Cannot refresh non-OAuth account credentials") + return + } + + // Use OpenAI OAuth service to refresh token + tokenInfo, err := h.openaiOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.InternalError(c, "Failed to refresh credentials: "+err.Error()) + return + } + + // Build new credentials from token info + newCredentials := h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Preserve non-token settings from existing credentials + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + updatedAccount, err := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Credentials: newCredentials, + }) + if err != nil { + response.InternalError(c, "Failed to update account credentials: "+err.Error()) + return + } + + response.Success(c, updatedAccount) +} + +// CreateAccountFromOAuth creates a new OpenAI OAuth account from token info +// POST /api/v1/admin/openai/create-from-oauth +func (h *OpenAIOAuthHandler) CreateAccountFromOAuth(c *gin.Context) { + var req struct { + SessionID string `json:"session_id" binding:"required"` + Code string `json:"code" binding:"required"` + RedirectURI string `json:"redirect_uri"` + ProxyID *int64 `json:"proxy_id"` + Name string `json:"name"` + Concurrency int `json:"concurrency"` + Priority int `json:"priority"` + GroupIDs []int64 `json:"group_ids"` + } + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Exchange code for tokens + tokenInfo, err := h.openaiOAuthService.ExchangeCode(c.Request.Context(), &service.OpenAIExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + RedirectURI: req.RedirectURI, + ProxyID: req.ProxyID, + }) + if err != nil { + response.BadRequest(c, "Failed to exchange code: "+err.Error()) + return + } + + // Build credentials from token info + credentials := h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Use email as default name if not provided + name := req.Name + if name == "" && tokenInfo.Email != "" { + name = tokenInfo.Email + } + if name == "" { + name = "OpenAI OAuth Account" + } + + // Create account + account, err := h.adminService.CreateAccount(c.Request.Context(), &service.CreateAccountInput{ + Name: name, + Platform: "openai", + Type: "oauth", + Credentials: credentials, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, + Priority: req.Priority, + GroupIDs: req.GroupIDs, + }) + if err != nil { + response.InternalError(c, "Failed to create account: "+err.Error()) + return + } + + response.Success(c, account) +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 9aa4f53d..ce640e86 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -13,24 +13,18 @@ import ( "sub2api/internal/middleware" "sub2api/internal/model" "sub2api/internal/pkg/claude" + "sub2api/internal/pkg/openai" "sub2api/internal/service" "github.com/gin-gonic/gin" ) -const ( - // Maximum wait time for concurrency slot - maxConcurrencyWait = 60 * time.Second - // Ping interval during wait - pingInterval = 5 * time.Second -) - // GatewayHandler handles API gateway requests type GatewayHandler struct { gatewayService *service.GatewayService userService *service.UserService - concurrencyService *service.ConcurrencyService billingCacheService *service.BillingCacheService + concurrencyHelper *ConcurrencyHelper } // NewGatewayHandler creates a new GatewayHandler @@ -38,8 +32,8 @@ func NewGatewayHandler(gatewayService *service.GatewayService, userService *serv return &GatewayHandler{ gatewayService: gatewayService, userService: userService, - concurrencyService: concurrencyService, billingCacheService: billingCacheService, + concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude), } } @@ -89,7 +83,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 0. 检查wait队列是否已满 maxWait := service.CalculateMaxWait(user.Concurrency) - canWait, err := h.concurrencyService.IncrementWaitCount(c.Request.Context(), user.ID, maxWait) + canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), user.ID, maxWait) if err != nil { log.Printf("Increment wait count failed: %v", err) // On error, allow request to proceed @@ -98,10 +92,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } // 确保在函数退出时减少wait计数 - defer h.concurrencyService.DecrementWaitCount(c.Request.Context(), user.ID) + defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), user.ID) // 1. 首先获取用户并发槽位 - userReleaseFunc, err := h.acquireUserSlotWithWait(c, user, req.Stream, &streamStarted) + userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, user, req.Stream, &streamStarted) if err != nil { log.Printf("User concurrency acquire failed: %v", err) h.handleConcurrencyError(c, err, "user", streamStarted) @@ -139,7 +133,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { } // 3. 获取账号并发槽位 - accountReleaseFunc, err := h.acquireAccountSlotWithWait(c, account, req.Stream, &streamStarted) + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account, req.Stream, &streamStarted) if err != nil { log.Printf("Account concurrency acquire failed: %v", err) h.handleConcurrencyError(c, err, "account", streamStarted) @@ -173,135 +167,25 @@ func (h *GatewayHandler) Messages(c *gin.Context) { }() } -// acquireUserSlotWithWait acquires a user concurrency slot, waiting if necessary -// For streaming requests, sends ping events during the wait -// streamStarted is updated if streaming response has begun -func (h *GatewayHandler) acquireUserSlotWithWait(c *gin.Context, user *model.User, isStream bool, streamStarted *bool) (func(), error) { - ctx := c.Request.Context() - - // Try to acquire immediately - result, err := h.concurrencyService.AcquireUserSlot(ctx, user.ID, user.Concurrency) - if err != nil { - return nil, err - } - - if result.Acquired { - return result.ReleaseFunc, nil - } - - // Need to wait - handle streaming ping if needed - return h.waitForSlotWithPing(c, "user", user.ID, user.Concurrency, isStream, streamStarted) -} - -// acquireAccountSlotWithWait acquires an account concurrency slot, waiting if necessary -// For streaming requests, sends ping events during the wait -// streamStarted is updated if streaming response has begun -func (h *GatewayHandler) acquireAccountSlotWithWait(c *gin.Context, account *model.Account, isStream bool, streamStarted *bool) (func(), error) { - ctx := c.Request.Context() - - // Try to acquire immediately - result, err := h.concurrencyService.AcquireAccountSlot(ctx, account.ID, account.Concurrency) - if err != nil { - return nil, err - } - - if result.Acquired { - return result.ReleaseFunc, nil - } - - // Need to wait - handle streaming ping if needed - return h.waitForSlotWithPing(c, "account", account.ID, account.Concurrency, isStream, streamStarted) -} - -// concurrencyError represents a concurrency limit error with context -type concurrencyError struct { - SlotType string - IsTimeout bool -} - -func (e *concurrencyError) Error() string { - if e.IsTimeout { - return fmt.Sprintf("timeout waiting for %s concurrency slot", e.SlotType) - } - return fmt.Sprintf("%s concurrency limit reached", e.SlotType) -} - -// waitForSlotWithPing waits for a concurrency slot, sending ping events for streaming requests -// Note: For streaming requests, we send ping to keep the connection alive. -// streamStarted pointer is updated when streaming begins (for proper error handling by caller) -func (h *GatewayHandler) waitForSlotWithPing(c *gin.Context, slotType string, id int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { - ctx, cancel := context.WithTimeout(c.Request.Context(), maxConcurrencyWait) - defer cancel() - - // For streaming requests, set up SSE headers for ping - var flusher http.Flusher - if isStream { - var ok bool - flusher, ok = c.Writer.(http.Flusher) - if !ok { - return nil, fmt.Errorf("streaming not supported") - } - } - - pingTicker := time.NewTicker(pingInterval) - defer pingTicker.Stop() - - pollTicker := time.NewTicker(100 * time.Millisecond) - defer pollTicker.Stop() - - for { - select { - case <-ctx.Done(): - return nil, &concurrencyError{ - SlotType: slotType, - IsTimeout: true, - } - - case <-pingTicker.C: - // Send ping for streaming requests to keep connection alive - if isStream && flusher != nil { - // Set headers on first ping (lazy initialization) - if !*streamStarted { - c.Header("Content-Type", "text/event-stream") - c.Header("Cache-Control", "no-cache") - c.Header("Connection", "keep-alive") - c.Header("X-Accel-Buffering", "no") - *streamStarted = true - } - if _, err := fmt.Fprintf(c.Writer, "data: {\"type\": \"ping\"}\n\n"); err != nil { - return nil, err - } - flusher.Flush() - } - - case <-pollTicker.C: - // Try to acquire slot - var result *service.AcquireResult - var err error - - if slotType == "user" { - result, err = h.concurrencyService.AcquireUserSlot(ctx, id, maxConcurrency) - } else { - result, err = h.concurrencyService.AcquireAccountSlot(ctx, id, maxConcurrency) - } - - if err != nil { - return nil, err - } - - if result.Acquired { - return result.ReleaseFunc, nil - } - } - } -} - // Models handles listing available models // GET /v1/models +// Returns different model lists based on the API key's group platform func (h *GatewayHandler) Models(c *gin.Context) { + apiKey, _ := middleware.GetApiKeyFromContext(c) + + // Return OpenAI models for OpenAI platform groups + if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform == "openai" { + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": openai.DefaultModels, + }) + return + } + + // Default: Claude models c.JSON(http.StatusOK, gin.H{ - "data": claude.DefaultModels, "object": "list", + "data": claude.DefaultModels, }) } diff --git a/backend/internal/handler/gateway_helper.go b/backend/internal/handler/gateway_helper.go new file mode 100644 index 00000000..3e69b9eb --- /dev/null +++ b/backend/internal/handler/gateway_helper.go @@ -0,0 +1,180 @@ +package handler + +import ( + "context" + "fmt" + "net/http" + "time" + + "sub2api/internal/model" + "sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +const ( + // maxConcurrencyWait is the maximum time to wait for a concurrency slot + maxConcurrencyWait = 30 * time.Second + // pingInterval is the interval for sending ping events during slot wait + pingInterval = 15 * time.Second +) + +// SSEPingFormat defines the format of SSE ping events for different platforms +type SSEPingFormat string + +const ( + // SSEPingFormatClaude is the Claude/Anthropic SSE ping format + SSEPingFormatClaude SSEPingFormat = "data: {\"type\": \"ping\"}\n\n" + // SSEPingFormatNone indicates no ping should be sent (e.g., OpenAI has no ping spec) + SSEPingFormatNone SSEPingFormat = "" +) + +// ConcurrencyError represents a concurrency limit error with context +type ConcurrencyError struct { + SlotType string + IsTimeout bool +} + +func (e *ConcurrencyError) Error() string { + if e.IsTimeout { + return fmt.Sprintf("timeout waiting for %s concurrency slot", e.SlotType) + } + return fmt.Sprintf("%s concurrency limit reached", e.SlotType) +} + +// ConcurrencyHelper provides common concurrency slot management for gateway handlers +type ConcurrencyHelper struct { + concurrencyService *service.ConcurrencyService + pingFormat SSEPingFormat +} + +// NewConcurrencyHelper creates a new ConcurrencyHelper +func NewConcurrencyHelper(concurrencyService *service.ConcurrencyService, pingFormat SSEPingFormat) *ConcurrencyHelper { + return &ConcurrencyHelper{ + concurrencyService: concurrencyService, + pingFormat: pingFormat, + } +} + +// IncrementWaitCount increments the wait count for a user +func (h *ConcurrencyHelper) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + return h.concurrencyService.IncrementWaitCount(ctx, userID, maxWait) +} + +// DecrementWaitCount decrements the wait count for a user +func (h *ConcurrencyHelper) DecrementWaitCount(ctx context.Context, userID int64) { + h.concurrencyService.DecrementWaitCount(ctx, userID) +} + +// AcquireUserSlotWithWait acquires a user concurrency slot, waiting if necessary. +// For streaming requests, sends ping events during the wait. +// streamStarted is updated if streaming response has begun. +func (h *ConcurrencyHelper) AcquireUserSlotWithWait(c *gin.Context, user *model.User, isStream bool, streamStarted *bool) (func(), error) { + ctx := c.Request.Context() + + // Try to acquire immediately + result, err := h.concurrencyService.AcquireUserSlot(ctx, user.ID, user.Concurrency) + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + + // Need to wait - handle streaming ping if needed + return h.waitForSlotWithPing(c, "user", user.ID, user.Concurrency, isStream, streamStarted) +} + +// AcquireAccountSlotWithWait acquires an account concurrency slot, waiting if necessary. +// For streaming requests, sends ping events during the wait. +// streamStarted is updated if streaming response has begun. +func (h *ConcurrencyHelper) AcquireAccountSlotWithWait(c *gin.Context, account *model.Account, isStream bool, streamStarted *bool) (func(), error) { + ctx := c.Request.Context() + + // Try to acquire immediately + result, err := h.concurrencyService.AcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + + // Need to wait - handle streaming ping if needed + return h.waitForSlotWithPing(c, "account", account.ID, account.Concurrency, isStream, streamStarted) +} + +// waitForSlotWithPing waits for a concurrency slot, sending ping events for streaming requests. +// streamStarted pointer is updated when streaming begins (for proper error handling by caller). +func (h *ConcurrencyHelper) waitForSlotWithPing(c *gin.Context, slotType string, id int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { + ctx, cancel := context.WithTimeout(c.Request.Context(), maxConcurrencyWait) + defer cancel() + + // Determine if ping is needed (streaming + ping format defined) + needPing := isStream && h.pingFormat != "" + + var flusher http.Flusher + if needPing { + var ok bool + flusher, ok = c.Writer.(http.Flusher) + if !ok { + return nil, fmt.Errorf("streaming not supported") + } + } + + // Only create ping ticker if ping is needed + var pingCh <-chan time.Time + if needPing { + pingTicker := time.NewTicker(pingInterval) + defer pingTicker.Stop() + pingCh = pingTicker.C + } + + pollTicker := time.NewTicker(100 * time.Millisecond) + defer pollTicker.Stop() + + for { + select { + case <-ctx.Done(): + return nil, &ConcurrencyError{ + SlotType: slotType, + IsTimeout: true, + } + + case <-pingCh: + // Send ping to keep connection alive + if !*streamStarted { + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + *streamStarted = true + } + if _, err := fmt.Fprint(c.Writer, string(h.pingFormat)); err != nil { + return nil, err + } + flusher.Flush() + + case <-pollTicker.C: + // Try to acquire slot + var result *service.AcquireResult + var err error + + if slotType == "user" { + result, err = h.concurrencyService.AcquireUserSlot(ctx, id, maxConcurrency) + } else { + result, err = h.concurrencyService.AcquireAccountSlot(ctx, id, maxConcurrency) + } + + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + } + } +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 830bd5e8..5b106a98 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -11,6 +11,7 @@ type AdminHandlers struct { Group *admin.GroupHandler Account *admin.AccountHandler OAuth *admin.OAuthHandler + OpenAIOAuth *admin.OpenAIOAuthHandler Proxy *admin.ProxyHandler Redeem *admin.RedeemHandler Setting *admin.SettingHandler @@ -21,15 +22,16 @@ type AdminHandlers struct { // Handlers contains all HTTP handlers type Handlers struct { - Auth *AuthHandler - User *UserHandler - APIKey *APIKeyHandler - Usage *UsageHandler - Redeem *RedeemHandler - Subscription *SubscriptionHandler - Admin *AdminHandlers - Gateway *GatewayHandler - Setting *SettingHandler + Auth *AuthHandler + User *UserHandler + APIKey *APIKeyHandler + Usage *UsageHandler + Redeem *RedeemHandler + Subscription *SubscriptionHandler + Admin *AdminHandlers + Gateway *GatewayHandler + OpenAIGateway *OpenAIGatewayHandler + Setting *SettingHandler } // BuildInfo contains build-time information diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go new file mode 100644 index 00000000..ce34a5cf --- /dev/null +++ b/backend/internal/handler/openai_gateway_handler.go @@ -0,0 +1,212 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "time" + + "sub2api/internal/middleware" + "sub2api/internal/pkg/openai" + "sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// OpenAIGatewayHandler handles OpenAI API gateway requests +type OpenAIGatewayHandler struct { + gatewayService *service.OpenAIGatewayService + userService *service.UserService + billingCacheService *service.BillingCacheService + concurrencyHelper *ConcurrencyHelper +} + +// NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler +func NewOpenAIGatewayHandler( + gatewayService *service.OpenAIGatewayService, + userService *service.UserService, + concurrencyService *service.ConcurrencyService, + billingCacheService *service.BillingCacheService, +) *OpenAIGatewayHandler { + return &OpenAIGatewayHandler{ + gatewayService: gatewayService, + userService: userService, + billingCacheService: billingCacheService, + concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatNone), + } +} + +// Responses handles OpenAI Responses API endpoint +// POST /openai/v1/responses +func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { + // Get apiKey and user from context (set by ApiKeyAuth middleware) + apiKey, ok := middleware.GetApiKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + user, ok := middleware.GetUserFromContext(c) + if !ok { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found") + return + } + + // Read request body + body, err := io.ReadAll(c.Request.Body) + if err != nil { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to read request body") + return + } + + if len(body) == 0 { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Request body is empty") + return + } + + // Parse request body to map for potential modification + var reqBody map[string]any + if err := json.Unmarshal(body, &reqBody); err != nil { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") + return + } + + // Extract model and stream + reqModel, _ := reqBody["model"].(string) + reqStream, _ := reqBody["stream"].(bool) + + // For non-Codex CLI requests, set default instructions + userAgent := c.GetHeader("User-Agent") + if !openai.IsCodexCLIRequest(userAgent) { + reqBody["instructions"] = openai.DefaultInstructions + // Re-serialize body + body, err = json.Marshal(reqBody) + if err != nil { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request") + return + } + } + + // Track if we've started streaming (for error handling) + streamStarted := false + + // Get subscription info (may be nil) + subscription, _ := middleware.GetSubscriptionFromContext(c) + + // 0. Check if wait queue is full + maxWait := service.CalculateMaxWait(user.Concurrency) + canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), user.ID, maxWait) + if err != nil { + log.Printf("Increment wait count failed: %v", err) + // On error, allow request to proceed + } else if !canWait { + h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") + return + } + // Ensure wait count is decremented when function exits + defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), user.ID) + + // 1. First acquire user concurrency slot + userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, user, reqStream, &streamStarted) + if err != nil { + log.Printf("User concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "user", streamStarted) + return + } + if userReleaseFunc != nil { + defer userReleaseFunc() + } + + // 2. Re-check billing eligibility after wait + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), user, apiKey, apiKey.Group, subscription); err != nil { + log.Printf("Billing eligibility check failed after wait: %v", err) + h.handleStreamingAwareError(c, http.StatusForbidden, "billing_error", err.Error(), streamStarted) + return + } + + // Generate session hash (from header for OpenAI) + sessionHash := h.gatewayService.GenerateSessionHash(c) + + // Select account supporting the requested model + log.Printf("[OpenAI Handler] Selecting account: groupID=%v model=%s", apiKey.GroupID, reqModel) + account, err := h.gatewayService.SelectAccountForModel(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel) + if err != nil { + log.Printf("[OpenAI Handler] SelectAccount failed: %v", err) + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) + return + } + log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name) + + // 3. Acquire account concurrency slot + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWait(c, account, reqStream, &streamStarted) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if accountReleaseFunc != nil { + defer accountReleaseFunc() + } + + // Forward request + result, err := h.gatewayService.Forward(c.Request.Context(), c, account, body) + if err != nil { + // Error response already handled in Forward, just log + log.Printf("Forward request failed: %v", err) + return + } + + // Async record usage + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.OpenAIRecordUsageInput{ + Result: result, + ApiKey: apiKey, + User: user, + Account: account, + Subscription: subscription, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }() +} + +// handleConcurrencyError handles concurrency-related errors with proper 429 response +func (h *OpenAIGatewayHandler) handleConcurrencyError(c *gin.Context, err error, slotType string, streamStarted bool) { + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", + fmt.Sprintf("Concurrency limit exceeded for %s, please retry later", slotType), streamStarted) +} + +// handleStreamingAwareError handles errors that may occur after streaming has started +func (h *OpenAIGatewayHandler) handleStreamingAwareError(c *gin.Context, status int, errType, message string, streamStarted bool) { + if streamStarted { + // Stream already started, send error as SSE event then close + flusher, ok := c.Writer.(http.Flusher) + if ok { + // Send error event in OpenAI SSE format + errorEvent := fmt.Sprintf(`event: error`+"\n"+`data: {"error": {"type": "%s", "message": "%s"}}`+"\n\n", errType, message) + if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil { + _ = c.Error(err) + } + flusher.Flush() + } + return + } + + // Normal case: return JSON response with proper status code + h.errorResponse(c, status, errType, message) +} + +// errorResponse returns OpenAI API format error response +func (h *OpenAIGatewayHandler) errorResponse(c *gin.Context, status int, errType, message string) { + c.JSON(status, gin.H{ + "error": gin.H{ + "type": errType, + "message": message, + }, + }) +} diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 435fbd0c..bf109dbf 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -14,6 +14,7 @@ func ProvideAdminHandlers( groupHandler *admin.GroupHandler, accountHandler *admin.AccountHandler, oauthHandler *admin.OAuthHandler, + openaiOAuthHandler *admin.OpenAIOAuthHandler, proxyHandler *admin.ProxyHandler, redeemHandler *admin.RedeemHandler, settingHandler *admin.SettingHandler, @@ -27,6 +28,7 @@ func ProvideAdminHandlers( Group: groupHandler, Account: accountHandler, OAuth: oauthHandler, + OpenAIOAuth: openaiOAuthHandler, Proxy: proxyHandler, Redeem: redeemHandler, Setting: settingHandler, @@ -56,18 +58,20 @@ func ProvideHandlers( subscriptionHandler *SubscriptionHandler, adminHandlers *AdminHandlers, gatewayHandler *GatewayHandler, + openaiGatewayHandler *OpenAIGatewayHandler, settingHandler *SettingHandler, ) *Handlers { return &Handlers{ - Auth: authHandler, - User: userHandler, - APIKey: apiKeyHandler, - Usage: usageHandler, - Redeem: redeemHandler, - Subscription: subscriptionHandler, - Admin: adminHandlers, - Gateway: gatewayHandler, - Setting: settingHandler, + Auth: authHandler, + User: userHandler, + APIKey: apiKeyHandler, + Usage: usageHandler, + Redeem: redeemHandler, + Subscription: subscriptionHandler, + Admin: adminHandlers, + Gateway: gatewayHandler, + OpenAIGateway: openaiGatewayHandler, + Setting: settingHandler, } } @@ -81,6 +85,7 @@ var ProviderSet = wire.NewSet( NewRedeemHandler, NewSubscriptionHandler, NewGatewayHandler, + NewOpenAIGatewayHandler, ProvideSettingHandler, // Admin handlers @@ -89,6 +94,7 @@ var ProviderSet = wire.NewSet( admin.NewGroupHandler, admin.NewAccountHandler, admin.NewOAuthHandler, + admin.NewOpenAIOAuthHandler, admin.NewProxyHandler, admin.NewRedeemHandler, admin.NewSettingHandler, diff --git a/backend/internal/model/account.go b/backend/internal/model/account.go index 2bb7079f..28e96ef9 100644 --- a/backend/internal/model/account.go +++ b/backend/internal/model/account.go @@ -277,3 +277,138 @@ func (a *Account) IsInterceptWarmupEnabled() bool { } return false } + +// =============== OpenAI 相关方法 =============== + +// IsOpenAI 检查是否为 OpenAI 平台账号 +func (a *Account) IsOpenAI() bool { + return a.Platform == PlatformOpenAI +} + +// IsAnthropic 检查是否为 Anthropic 平台账号 +func (a *Account) IsAnthropic() bool { + return a.Platform == PlatformAnthropic +} + +// IsOpenAIOAuth 检查是否为 OpenAI OAuth 类型账号 +func (a *Account) IsOpenAIOAuth() bool { + return a.IsOpenAI() && a.Type == AccountTypeOAuth +} + +// IsOpenAIApiKey 检查是否为 OpenAI API Key 类型账号(Response 账号) +func (a *Account) IsOpenAIApiKey() bool { + return a.IsOpenAI() && a.Type == AccountTypeApiKey +} + +// GetOpenAIBaseURL 获取 OpenAI API 基础 URL +// 对于 API Key 类型账号,从 credentials 中获取 base_url +// 对于 OAuth 类型账号,返回默认的 OpenAI API URL +func (a *Account) GetOpenAIBaseURL() string { + if !a.IsOpenAI() { + return "" + } + if a.Type == AccountTypeApiKey { + baseURL := a.GetCredential("base_url") + if baseURL != "" { + return baseURL + } + } + return "https://api.openai.com" // OpenAI 默认 API URL +} + +// GetOpenAIAccessToken 获取 OpenAI 访问令牌 +func (a *Account) GetOpenAIAccessToken() string { + if !a.IsOpenAI() { + return "" + } + return a.GetCredential("access_token") +} + +// GetOpenAIRefreshToken 获取 OpenAI 刷新令牌 +func (a *Account) GetOpenAIRefreshToken() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("refresh_token") +} + +// GetOpenAIIDToken 获取 OpenAI ID Token(JWT,包含用户信息) +func (a *Account) GetOpenAIIDToken() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("id_token") +} + +// GetOpenAIApiKey 获取 OpenAI API Key(用于 Response 账号) +func (a *Account) GetOpenAIApiKey() string { + if !a.IsOpenAIApiKey() { + return "" + } + return a.GetCredential("api_key") +} + +// GetOpenAIUserAgent 获取 OpenAI 自定义 User-Agent +// 返回空字符串表示透传原始 User-Agent +func (a *Account) GetOpenAIUserAgent() string { + if !a.IsOpenAI() { + return "" + } + return a.GetCredential("user_agent") +} + +// GetChatGPTAccountID 获取 ChatGPT 账号 ID(从 ID Token 解析) +func (a *Account) GetChatGPTAccountID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("chatgpt_account_id") +} + +// GetChatGPTUserID 获取 ChatGPT 用户 ID(从 ID Token 解析) +func (a *Account) GetChatGPTUserID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("chatgpt_user_id") +} + +// GetOpenAIOrganizationID 获取 OpenAI 组织 ID +func (a *Account) GetOpenAIOrganizationID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("organization_id") +} + +// GetOpenAITokenExpiresAt 获取 OpenAI Token 过期时间 +func (a *Account) GetOpenAITokenExpiresAt() *time.Time { + if !a.IsOpenAIOAuth() { + return nil + } + expiresAtStr := a.GetCredential("expires_at") + if expiresAtStr == "" { + return nil + } + // 尝试解析时间 + t, err := time.Parse(time.RFC3339, expiresAtStr) + if err != nil { + // 尝试解析为 Unix 时间戳 + if v, ok := a.Credentials["expires_at"].(float64); ok { + t = time.Unix(int64(v), 0) + return &t + } + return nil + } + return &t +} + +// IsOpenAITokenExpired 检查 OpenAI Token 是否过期 +func (a *Account) IsOpenAITokenExpired() bool { + expiresAt := a.GetOpenAITokenExpiresAt() + if expiresAt == nil { + return false // 没有过期时间信息,假设未过期 + } + // 提前 60 秒认为过期,便于刷新 + return time.Now().Add(60 * time.Second).After(*expiresAt) +} diff --git a/backend/internal/pkg/oauth/oauth.go b/backend/internal/pkg/oauth/oauth.go index 6e774e6d..22dbff3f 100644 --- a/backend/internal/pkg/oauth/oauth.go +++ b/backend/internal/pkg/oauth/oauth.go @@ -43,18 +43,25 @@ type OAuthSession struct { type SessionStore struct { mu sync.RWMutex sessions map[string]*OAuthSession + stopCh chan struct{} } // NewSessionStore creates a new session store func NewSessionStore() *SessionStore { store := &SessionStore{ sessions: make(map[string]*OAuthSession), + stopCh: make(chan struct{}), } // Start cleanup goroutine go store.cleanup() return store } +// Stop stops the cleanup goroutine +func (s *SessionStore) Stop() { + close(s.stopCh) +} + // Set stores a session func (s *SessionStore) Set(sessionID string, session *OAuthSession) { s.mu.Lock() @@ -87,14 +94,20 @@ func (s *SessionStore) Delete(sessionID string) { // cleanup removes expired sessions periodically func (s *SessionStore) cleanup() { ticker := time.NewTicker(5 * time.Minute) - for range ticker.C { - s.mu.Lock() - for id, session := range s.sessions { - if time.Since(session.CreatedAt) > SessionTTL { - delete(s.sessions, id) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } } + s.mu.Unlock() } - s.mu.Unlock() } } diff --git a/backend/internal/pkg/openai/constants.go b/backend/internal/pkg/openai/constants.go new file mode 100644 index 00000000..d97507a8 --- /dev/null +++ b/backend/internal/pkg/openai/constants.go @@ -0,0 +1,42 @@ +package openai + +import _ "embed" + +// Model represents an OpenAI model +type Model struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + OwnedBy string `json:"owned_by"` + Type string `json:"type"` + DisplayName string `json:"display_name"` +} + +// DefaultModels OpenAI models list +var DefaultModels = []Model{ + {ID: "gpt-5.2", Object: "model", Created: 1733875200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2"}, + {ID: "gpt-5.2-codex", Object: "model", Created: 1733011200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2 Codex"}, + {ID: "gpt-5.1-codex-max", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex Max"}, + {ID: "gpt-5.1-codex", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex"}, + {ID: "gpt-5.1", Object: "model", Created: 1731456000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1"}, + {ID: "gpt-5.1-codex-mini", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex Mini"}, + {ID: "gpt-5", Object: "model", Created: 1722988800, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5"}, +} + +// DefaultModelIDs returns the default model ID list +func DefaultModelIDs() []string { + ids := make([]string, len(DefaultModels)) + for i, m := range DefaultModels { + ids[i] = m.ID + } + return ids +} + +// DefaultTestModel default model for testing OpenAI accounts +const DefaultTestModel = "gpt-5.1-codex" + +// DefaultInstructions default instructions for non-Codex CLI requests +// Content loaded from instructions.txt at compile time +// +//go:embed instructions.txt +var DefaultInstructions string diff --git a/backend/internal/pkg/openai/instructions.txt b/backend/internal/pkg/openai/instructions.txt new file mode 100644 index 00000000..431f0f84 --- /dev/null +++ b/backend/internal/pkg/openai/instructions.txt @@ -0,0 +1,118 @@ +You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer. + +## General + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) + +## Editing constraints + +- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them. +- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like \"Assigns the value to the variable\", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare. +- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase). +- You may be in a dirty git worktree. + * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user. + * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes. + * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them. + * If the changes are in unrelated files, just ignore them and don't revert them. + - Do not amend a commit unless explicitly requested to do so. +- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed. +- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user. + +## Plan tool + +When using the planning tool: +- Skip using the planning tool for straightforward tasks (roughly the easiest 25%). +- Do not make single-step plans. +- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan. + +## Codex CLI harness, sandboxing, and approvals + +The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from. + +Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are: +- **read-only**: The sandbox only permits reading files. +- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. +- **danger-full-access**: No filesystem sandboxing - all commands are permitted. + +Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are: +- **restricted**: Requires approval +- **enabled**: No approval needed + +Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are +- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe \"read\" commands. +- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. +- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) +- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. + +When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: +- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var) +- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. +- You are running sandboxed and need to run a command that requires network access (e.g. installing packages) +- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command. +- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for +- (for all of these, you should weigh alternative paths that do not require approval) + +When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read. + +You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure. + +Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to \"never\", in which case never ask for approvals. + +When requesting approval to execute a command that will require escalated privileges: + - Provide the `sandbox_permissions` parameter with the value `\"require_escalated\"` + - Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter + +## Special user requests + +- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so. +- If the user asks for a \"review\", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps. + +## Frontend tasks +When doing frontend design tasks, avoid collapsing into \"AI slop\" or safe, average-looking layouts. +Aim for interfaces that feel intentional, bold, and a bit surprising. +- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system). +- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias. +- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions. +- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere. +- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs. +- Ensure the page loads properly on both desktop and mobile + +Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language. + +## Presenting your work and final message + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +- Default: be very concise; friendly coding teammate tone. +- Ask only when needed; suggest ideas; mirror the user's style. +- For substantial work, summarize clearly; follow final‑answer formatting. +- Skip heavy formatting for simple confirmations. +- Don't dump large files you've written; reference paths only. +- No \"save/copy this file\" - User is on the same machine. +- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something. +- For code changes: + * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with \"summary\", just jump right in. + * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps. + * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number. + - The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result. + +### Final answer structure and style guidelines + +- Plain text; CLI handles styling. Use structure only when it helps scanability. +- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help. +- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent. +- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **. +- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible. +- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task. +- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no \"above/below\"; parallel wording. +- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers. +- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets. +- File References: When referencing files in your response follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\\repo\\project\\main.rs:12:5 + \ No newline at end of file diff --git a/backend/internal/pkg/openai/oauth.go b/backend/internal/pkg/openai/oauth.go new file mode 100644 index 00000000..90d2e001 --- /dev/null +++ b/backend/internal/pkg/openai/oauth.go @@ -0,0 +1,366 @@ +package openai + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "net/url" + "strings" + "sync" + "time" +) + +// OpenAI OAuth Constants (from CRS project - Codex CLI client) +const ( + // OAuth Client ID for OpenAI (Codex CLI official) + ClientID = "app_EMoamEEZ73f0CkXaXp7hrann" + + // OAuth endpoints + AuthorizeURL = "https://auth.openai.com/oauth/authorize" + TokenURL = "https://auth.openai.com/oauth/token" + + // Default redirect URI (can be customized) + DefaultRedirectURI = "http://localhost:1455/auth/callback" + + // Scopes + DefaultScopes = "openid profile email offline_access" + // RefreshScopes - scope for token refresh (without offline_access, aligned with CRS project) + RefreshScopes = "openid profile email" + + // Session TTL + SessionTTL = 30 * time.Minute +) + +// OAuthSession stores OAuth flow state for OpenAI +type OAuthSession struct { + State string `json:"state"` + CodeVerifier string `json:"code_verifier"` + ProxyURL string `json:"proxy_url,omitempty"` + RedirectURI string `json:"redirect_uri"` + CreatedAt time.Time `json:"created_at"` +} + +// SessionStore manages OAuth sessions in memory +type SessionStore struct { + mu sync.RWMutex + sessions map[string]*OAuthSession + stopCh chan struct{} +} + +// NewSessionStore creates a new session store +func NewSessionStore() *SessionStore { + store := &SessionStore{ + sessions: make(map[string]*OAuthSession), + stopCh: make(chan struct{}), + } + // Start cleanup goroutine + go store.cleanup() + return store +} + +// Set stores a session +func (s *SessionStore) Set(sessionID string, session *OAuthSession) { + s.mu.Lock() + defer s.mu.Unlock() + s.sessions[sessionID] = session +} + +// Get retrieves a session +func (s *SessionStore) Get(sessionID string) (*OAuthSession, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + session, ok := s.sessions[sessionID] + if !ok { + return nil, false + } + // Check if expired + if time.Since(session.CreatedAt) > SessionTTL { + return nil, false + } + return session, true +} + +// Delete removes a session +func (s *SessionStore) Delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionID) +} + +// Stop stops the cleanup goroutine +func (s *SessionStore) Stop() { + close(s.stopCh) +} + +// cleanup removes expired sessions periodically +func (s *SessionStore) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } + } + s.mu.Unlock() + } + } +} + +// GenerateRandomBytes generates cryptographically secure random bytes +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} + +// GenerateState generates a random state string for OAuth +func GenerateState() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateSessionID generates a unique session ID +func GenerateSessionID() (string, error) { + bytes, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeVerifier generates a PKCE code verifier (64 bytes -> hex for OpenAI) +// OpenAI uses hex encoding instead of base64url +func GenerateCodeVerifier() (string, error) { + bytes, err := GenerateRandomBytes(64) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeChallenge generates a PKCE code challenge using S256 method +// Uses base64url encoding as per RFC 7636 +func GenerateCodeChallenge(verifier string) string { + hash := sha256.Sum256([]byte(verifier)) + return base64URLEncode(hash[:]) +} + +// base64URLEncode encodes bytes to base64url without padding +func base64URLEncode(data []byte) string { + encoded := base64.URLEncoding.EncodeToString(data) + // Remove padding + return strings.TrimRight(encoded, "=") +} + +// BuildAuthorizationURL builds the OpenAI OAuth authorization URL +func BuildAuthorizationURL(state, codeChallenge, redirectURI string) string { + if redirectURI == "" { + redirectURI = DefaultRedirectURI + } + + params := url.Values{} + params.Set("response_type", "code") + params.Set("client_id", ClientID) + params.Set("redirect_uri", redirectURI) + params.Set("scope", DefaultScopes) + params.Set("state", state) + params.Set("code_challenge", codeChallenge) + params.Set("code_challenge_method", "S256") + // OpenAI specific parameters + params.Set("id_token_add_organizations", "true") + params.Set("codex_cli_simplified_flow", "true") + + return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) +} + +// TokenRequest represents the token exchange request body +type TokenRequest struct { + GrantType string `json:"grant_type"` + ClientID string `json:"client_id"` + Code string `json:"code"` + RedirectURI string `json:"redirect_uri"` + CodeVerifier string `json:"code_verifier"` +} + +// TokenResponse represents the token response from OpenAI OAuth +type TokenResponse struct { + AccessToken string `json:"access_token"` + IDToken string `json:"id_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` +} + +// RefreshTokenRequest represents the refresh token request +type RefreshTokenRequest struct { + GrantType string `json:"grant_type"` + RefreshToken string `json:"refresh_token"` + ClientID string `json:"client_id"` + Scope string `json:"scope"` +} + +// IDTokenClaims represents the claims from OpenAI ID Token +type IDTokenClaims struct { + // Standard claims + Sub string `json:"sub"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Iss string `json:"iss"` + Aud []string `json:"aud"` // OpenAI returns aud as an array + Exp int64 `json:"exp"` + Iat int64 `json:"iat"` + + // OpenAI specific claims (nested under https://api.openai.com/auth) + OpenAIAuth *OpenAIAuthClaims `json:"https://api.openai.com/auth,omitempty"` +} + +// OpenAIAuthClaims represents the OpenAI specific auth claims +type OpenAIAuthClaims struct { + ChatGPTAccountID string `json:"chatgpt_account_id"` + ChatGPTUserID string `json:"chatgpt_user_id"` + UserID string `json:"user_id"` + Organizations []OrganizationClaim `json:"organizations"` +} + +// OrganizationClaim represents an organization in the ID Token +type OrganizationClaim struct { + ID string `json:"id"` + Role string `json:"role"` + Title string `json:"title"` + IsDefault bool `json:"is_default"` +} + +// BuildTokenRequest creates a token exchange request for OpenAI +func BuildTokenRequest(code, codeVerifier, redirectURI string) *TokenRequest { + if redirectURI == "" { + redirectURI = DefaultRedirectURI + } + return &TokenRequest{ + GrantType: "authorization_code", + ClientID: ClientID, + Code: code, + RedirectURI: redirectURI, + CodeVerifier: codeVerifier, + } +} + +// BuildRefreshTokenRequest creates a refresh token request for OpenAI +func BuildRefreshTokenRequest(refreshToken string) *RefreshTokenRequest { + return &RefreshTokenRequest{ + GrantType: "refresh_token", + RefreshToken: refreshToken, + ClientID: ClientID, + Scope: RefreshScopes, + } +} + +// ToFormData converts TokenRequest to URL-encoded form data +func (r *TokenRequest) ToFormData() string { + params := url.Values{} + params.Set("grant_type", r.GrantType) + params.Set("client_id", r.ClientID) + params.Set("code", r.Code) + params.Set("redirect_uri", r.RedirectURI) + params.Set("code_verifier", r.CodeVerifier) + return params.Encode() +} + +// ToFormData converts RefreshTokenRequest to URL-encoded form data +func (r *RefreshTokenRequest) ToFormData() string { + params := url.Values{} + params.Set("grant_type", r.GrantType) + params.Set("client_id", r.ClientID) + params.Set("refresh_token", r.RefreshToken) + params.Set("scope", r.Scope) + return params.Encode() +} + +// ParseIDToken parses the ID Token JWT and extracts claims +// Note: This does NOT verify the signature - it only decodes the payload +// For production, you should verify the token signature using OpenAI's public keys +func ParseIDToken(idToken string) (*IDTokenClaims, error) { + parts := strings.Split(idToken, ".") + if len(parts) != 3 { + return nil, fmt.Errorf("invalid JWT format: expected 3 parts, got %d", len(parts)) + } + + // Decode payload (second part) + payload := parts[1] + // Add padding if necessary + switch len(payload) % 4 { + case 2: + payload += "==" + case 3: + payload += "=" + } + + decoded, err := base64.URLEncoding.DecodeString(payload) + if err != nil { + // Try standard encoding + decoded, err = base64.StdEncoding.DecodeString(payload) + if err != nil { + return nil, fmt.Errorf("failed to decode JWT payload: %w", err) + } + } + + var claims IDTokenClaims + if err := json.Unmarshal(decoded, &claims); err != nil { + return nil, fmt.Errorf("failed to parse JWT claims: %w", err) + } + + return &claims, nil +} + +// ExtractUserInfo extracts user information from ID Token claims +type UserInfo struct { + Email string + ChatGPTAccountID string + ChatGPTUserID string + UserID string + OrganizationID string + Organizations []OrganizationClaim +} + +// GetUserInfo extracts user info from ID Token claims +func (c *IDTokenClaims) GetUserInfo() *UserInfo { + info := &UserInfo{ + Email: c.Email, + } + + if c.OpenAIAuth != nil { + info.ChatGPTAccountID = c.OpenAIAuth.ChatGPTAccountID + info.ChatGPTUserID = c.OpenAIAuth.ChatGPTUserID + info.UserID = c.OpenAIAuth.UserID + info.Organizations = c.OpenAIAuth.Organizations + + // Get default organization ID + for _, org := range c.OpenAIAuth.Organizations { + if org.IsDefault { + info.OrganizationID = org.ID + break + } + } + // If no default, use first org + if info.OrganizationID == "" && len(c.OpenAIAuth.Organizations) > 0 { + info.OrganizationID = c.OpenAIAuth.Organizations[0].ID + } + } + + return info +} diff --git a/backend/internal/pkg/openai/request.go b/backend/internal/pkg/openai/request.go new file mode 100644 index 00000000..5b049ddc --- /dev/null +++ b/backend/internal/pkg/openai/request.go @@ -0,0 +1,18 @@ +package openai + +// CodexCLIUserAgentPrefixes matches Codex CLI User-Agent patterns +// Examples: "codex_vscode/1.0.0", "codex_cli_rs/0.1.2" +var CodexCLIUserAgentPrefixes = []string{ + "codex_vscode/", + "codex_cli_rs/", +} + +// IsCodexCLIRequest checks if the User-Agent indicates a Codex CLI request +func IsCodexCLIRequest(userAgent string) bool { + for _, prefix := range CodexCLIUserAgentPrefixes { + if len(userAgent) >= len(prefix) && userAgent[:len(prefix)] == prefix { + return true + } + } + return false +} diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 2a1317c9..639e8046 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -222,6 +222,38 @@ func (r *AccountRepository) ListSchedulableByGroupID(ctx context.Context, groupI return accounts, err } +// ListSchedulableByPlatform 按平台获取可调度的账号 +func (r *AccountRepository) ListSchedulableByPlatform(ctx context.Context, platform string) ([]model.Account, error) { + var accounts []model.Account + now := time.Now() + err := r.db.WithContext(ctx). + Where("platform = ?", platform). + Where("status = ? AND schedulable = ?", model.StatusActive, true). + Where("(overload_until IS NULL OR overload_until <= ?)", now). + Where("(rate_limit_reset_at IS NULL OR rate_limit_reset_at <= ?)", now). + Preload("Proxy"). + Order("priority ASC"). + Find(&accounts).Error + return accounts, err +} + +// ListSchedulableByGroupIDAndPlatform 按组和平台获取可调度的账号 +func (r *AccountRepository) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]model.Account, error) { + var accounts []model.Account + now := time.Now() + err := r.db.WithContext(ctx). + Joins("JOIN account_groups ON account_groups.account_id = accounts.id"). + Where("account_groups.group_id = ?", groupID). + Where("accounts.platform = ?", platform). + Where("accounts.status = ? AND accounts.schedulable = ?", model.StatusActive, true). + Where("(accounts.overload_until IS NULL OR accounts.overload_until <= ?)", now). + Where("(accounts.rate_limit_reset_at IS NULL OR accounts.rate_limit_reset_at <= ?)", now). + Preload("Proxy"). + Order("account_groups.priority ASC, accounts.priority ASC"). + Find(&accounts).Error + return accounts, err +} + // SetRateLimited 标记账号为限流状态(429) func (r *AccountRepository) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { now := time.Now() diff --git a/backend/internal/repository/claude_service.go b/backend/internal/repository/http_upstream.go similarity index 68% rename from backend/internal/repository/claude_service.go rename to backend/internal/repository/http_upstream.go index dad8730e..d5c0356e 100644 --- a/backend/internal/repository/claude_service.go +++ b/backend/internal/repository/http_upstream.go @@ -6,15 +6,18 @@ import ( "time" "sub2api/internal/config" - "sub2api/internal/service" + "sub2api/internal/service/ports" ) -type claudeUpstreamService struct { +// httpUpstreamService is a generic HTTP upstream service that can be used for +// making requests to any HTTP API (Claude, OpenAI, etc.) with optional proxy support. +type httpUpstreamService struct { defaultClient *http.Client cfg *config.Config } -func NewClaudeUpstream(cfg *config.Config) service.ClaudeUpstream { +// NewHTTPUpstream creates a new generic HTTP upstream service +func NewHTTPUpstream(cfg *config.Config) ports.HTTPUpstream { responseHeaderTimeout := time.Duration(cfg.Gateway.ResponseHeaderTimeout) * time.Second if responseHeaderTimeout == 0 { responseHeaderTimeout = 300 * time.Second @@ -27,13 +30,13 @@ func NewClaudeUpstream(cfg *config.Config) service.ClaudeUpstream { ResponseHeaderTimeout: responseHeaderTimeout, } - return &claudeUpstreamService{ + return &httpUpstreamService{ defaultClient: &http.Client{Transport: transport}, cfg: cfg, } } -func (s *claudeUpstreamService) Do(req *http.Request, proxyURL string) (*http.Response, error) { +func (s *httpUpstreamService) Do(req *http.Request, proxyURL string) (*http.Response, error) { if proxyURL == "" { return s.defaultClient.Do(req) } @@ -41,7 +44,7 @@ func (s *claudeUpstreamService) Do(req *http.Request, proxyURL string) (*http.Re return client.Do(req) } -func (s *claudeUpstreamService) createProxyClient(proxyURL string) *http.Client { +func (s *httpUpstreamService) createProxyClient(proxyURL string) *http.Client { parsedURL, err := url.Parse(proxyURL) if err != nil { return s.defaultClient diff --git a/backend/internal/repository/openai_oauth_service.go b/backend/internal/repository/openai_oauth_service.go new file mode 100644 index 00000000..b7ca7b8e --- /dev/null +++ b/backend/internal/repository/openai_oauth_service.go @@ -0,0 +1,92 @@ +package repository + +import ( + "context" + "fmt" + "net/url" + "time" + + "sub2api/internal/pkg/openai" + "sub2api/internal/service/ports" + + "github.com/imroc/req/v3" +) + +type openaiOAuthService struct{} + +// NewOpenAIOAuthClient creates a new OpenAI OAuth client +func NewOpenAIOAuthClient() ports.OpenAIOAuthClient { + return &openaiOAuthService{} +} + +func (s *openaiOAuthService) ExchangeCode(ctx context.Context, code, codeVerifier, redirectURI, proxyURL string) (*openai.TokenResponse, error) { + client := createOpenAIReqClient(proxyURL) + + if redirectURI == "" { + redirectURI = openai.DefaultRedirectURI + } + + formData := url.Values{} + formData.Set("grant_type", "authorization_code") + formData.Set("client_id", openai.ClientID) + formData.Set("code", code) + formData.Set("redirect_uri", redirectURI) + formData.Set("code_verifier", codeVerifier) + + var tokenResp openai.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(openai.TokenURL) + + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token exchange failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + return &tokenResp, nil +} + +func (s *openaiOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*openai.TokenResponse, error) { + client := createOpenAIReqClient(proxyURL) + + formData := url.Values{} + formData.Set("grant_type", "refresh_token") + formData.Set("refresh_token", refreshToken) + formData.Set("client_id", openai.ClientID) + formData.Set("scope", openai.RefreshScopes) + + var tokenResp openai.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(openai.TokenURL) + + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token refresh failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + return &tokenResp, nil +} + +func createOpenAIReqClient(proxyURL string) *req.Client { + client := req.C(). + SetTimeout(60 * time.Second) + + if proxyURL != "" { + client.SetProxyURL(proxyURL) + } + + return client +} diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index ac8838cc..a0c20461 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -36,7 +36,8 @@ var ProviderSet = wire.NewSet( NewProxyExitInfoProber, NewClaudeUsageFetcher, NewClaudeOAuthClient, - NewClaudeUpstream, + NewHTTPUpstream, + NewOpenAIOAuthClient, // Bind concrete repositories to service port interfaces wire.Bind(new(ports.UserRepository), new(*UserRepository)), diff --git a/backend/internal/server/router.go b/backend/internal/server/router.go index 360595f0..d44a021b 100644 --- a/backend/internal/server/router.go +++ b/backend/internal/server/router.go @@ -192,7 +192,7 @@ func registerRoutes(r *gin.Engine, h *handler.Handlers, s *service.Services, rep accounts.GET("/:id/models", h.Admin.Account.GetAvailableModels) accounts.POST("/batch", h.Admin.Account.BatchCreate) - // OAuth routes + // Claude OAuth routes accounts.POST("/generate-auth-url", h.Admin.OAuth.GenerateAuthURL) accounts.POST("/generate-setup-token-url", h.Admin.OAuth.GenerateSetupTokenURL) accounts.POST("/exchange-code", h.Admin.OAuth.ExchangeCode) @@ -201,6 +201,16 @@ func registerRoutes(r *gin.Engine, h *handler.Handlers, s *service.Services, rep accounts.POST("/setup-token-cookie-auth", h.Admin.OAuth.SetupTokenCookieAuth) } + // OpenAI OAuth routes + openai := admin.Group("/openai") + { + openai.POST("/generate-auth-url", h.Admin.OpenAIOAuth.GenerateAuthURL) + openai.POST("/exchange-code", h.Admin.OpenAIOAuth.ExchangeCode) + openai.POST("/refresh-token", h.Admin.OpenAIOAuth.RefreshToken) + openai.POST("/accounts/:id/refresh", h.Admin.OpenAIOAuth.RefreshAccountToken) + openai.POST("/create-from-oauth", h.Admin.OpenAIOAuth.CreateAccountFromOAuth) + } + // 代理管理 proxies := admin.Group("/proxies") { @@ -289,5 +299,10 @@ func registerRoutes(r *gin.Engine, h *handler.Handlers, s *service.Services, rep gateway.POST("/messages/count_tokens", h.Gateway.CountTokens) gateway.GET("/models", h.Gateway.Models) gateway.GET("/usage", h.Gateway.Usage) + // OpenAI Responses API + gateway.POST("/responses", h.OpenAIGateway.Responses) } + + // OpenAI Responses API(不带v1前缀的别名) + r.POST("/responses", middleware.ApiKeyAuthWithSubscription(s.ApiKey, s.Subscription), h.OpenAIGateway.Responses) } diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go index e9bc5ec5..9d995685 100644 --- a/backend/internal/service/account_test_service.go +++ b/backend/internal/service/account_test_service.go @@ -14,7 +14,9 @@ import ( "strings" "time" + "sub2api/internal/model" "sub2api/internal/pkg/claude" + "sub2api/internal/pkg/openai" "sub2api/internal/service/ports" "github.com/gin-gonic/gin" @@ -22,7 +24,9 @@ import ( ) const ( - testClaudeAPIURL = "https://api.anthropic.com/v1/messages" + testClaudeAPIURL = "https://api.anthropic.com/v1/messages" + testOpenAIAPIURL = "https://api.openai.com/v1/responses" + chatgptCodexAPIURL = "https://chatgpt.com/backend-api/codex/responses" ) // TestEvent represents a SSE event for account testing @@ -36,17 +40,19 @@ type TestEvent struct { // AccountTestService handles account testing operations type AccountTestService struct { - accountRepo ports.AccountRepository - oauthService *OAuthService - claudeUpstream ClaudeUpstream + accountRepo ports.AccountRepository + oauthService *OAuthService + openaiOAuthService *OpenAIOAuthService + httpUpstream ports.HTTPUpstream } // NewAccountTestService creates a new AccountTestService -func NewAccountTestService(accountRepo ports.AccountRepository, oauthService *OAuthService, claudeUpstream ClaudeUpstream) *AccountTestService { +func NewAccountTestService(accountRepo ports.AccountRepository, oauthService *OAuthService, openaiOAuthService *OpenAIOAuthService, httpUpstream ports.HTTPUpstream) *AccountTestService { return &AccountTestService{ - accountRepo: accountRepo, - oauthService: oauthService, - claudeUpstream: claudeUpstream, + accountRepo: accountRepo, + oauthService: oauthService, + openaiOAuthService: openaiOAuthService, + httpUpstream: httpUpstream, } } @@ -114,6 +120,18 @@ func (s *AccountTestService) TestAccountConnection(c *gin.Context, accountID int return s.sendErrorAndEnd(c, "Account not found") } + // Route to platform-specific test method + if account.IsOpenAI() { + return s.testOpenAIAccountConnection(c, account, modelID) + } + + return s.testClaudeAccountConnection(c, account, modelID) +} + +// testClaudeAccountConnection tests an Anthropic Claude account's connection +func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account *model.Account, modelID string) error { + ctx := c.Request.Context() + // Determine the model to use testModelID := modelID if testModelID == "" { @@ -222,7 +240,7 @@ func (s *AccountTestService) TestAccountConnection(c *gin.Context, accountID int proxyURL = account.Proxy.URL() } - resp, err := s.claudeUpstream.Do(req, proxyURL) + resp, err := s.httpUpstream.Do(req, proxyURL) if err != nil { return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) } @@ -234,11 +252,153 @@ func (s *AccountTestService) TestAccountConnection(c *gin.Context, accountID int } // Process SSE stream - return s.processStream(c, resp.Body) + return s.processClaudeStream(c, resp.Body) } -// processStream processes the SSE stream from Claude API -func (s *AccountTestService) processStream(c *gin.Context, body io.Reader) error { +// testOpenAIAccountConnection tests an OpenAI account's connection +func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account *model.Account, modelID string) error { + ctx := c.Request.Context() + + // Default to openai.DefaultTestModel for OpenAI testing + testModelID := modelID + if testModelID == "" { + testModelID = openai.DefaultTestModel + } + + // For API Key accounts with model mapping, map the model + if account.Type == "apikey" { + mapping := account.GetModelMapping() + if len(mapping) > 0 { + if mappedModel, exists := mapping[testModelID]; exists { + testModelID = mappedModel + } + } + } + + // Determine authentication method and API URL + var authToken string + var apiURL string + var isOAuth bool + var chatgptAccountID string + + if account.IsOAuth() { + isOAuth = true + // OAuth - use Bearer token with ChatGPT internal API + authToken = account.GetOpenAIAccessToken() + if authToken == "" { + return s.sendErrorAndEnd(c, "No access token available") + } + + // Check if token is expired and refresh if needed + if account.IsOpenAITokenExpired() && s.openaiOAuthService != nil { + tokenInfo, err := s.openaiOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Failed to refresh token: %s", err.Error())) + } + authToken = tokenInfo.AccessToken + } + + // OAuth uses ChatGPT internal API + apiURL = chatgptCodexAPIURL + chatgptAccountID = account.GetChatGPTAccountID() + } else if account.Type == "apikey" { + // API Key - use Platform API + authToken = account.GetOpenAIApiKey() + if authToken == "" { + return s.sendErrorAndEnd(c, "No API key available") + } + + baseURL := account.GetOpenAIBaseURL() + if baseURL == "" { + baseURL = "https://api.openai.com" + } + apiURL = strings.TrimSuffix(baseURL, "/") + "/v1/responses" + } else { + return s.sendErrorAndEnd(c, fmt.Sprintf("Unsupported account type: %s", account.Type)) + } + + // Set SSE headers + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("X-Accel-Buffering", "no") + c.Writer.Flush() + + // Create OpenAI Responses API payload + payload := createOpenAITestPayload(testModelID, isOAuth) + payloadBytes, _ := json.Marshal(payload) + + // Send test_start event + s.sendEvent(c, TestEvent{Type: "test_start", Model: testModelID}) + + req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewReader(payloadBytes)) + if err != nil { + return s.sendErrorAndEnd(c, "Failed to create request") + } + + // Set common headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+authToken) + + // Set OAuth-specific headers for ChatGPT internal API + if isOAuth { + req.Host = "chatgpt.com" + req.Header.Set("accept", "text/event-stream") + if chatgptAccountID != "" { + req.Header.Set("chatgpt-account-id", chatgptAccountID) + } + } + + // Get proxy URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.Do(req, proxyURL) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) + } + + // Process SSE stream + return s.processOpenAIStream(c, resp.Body) +} + +// createOpenAITestPayload creates a test payload for OpenAI Responses API +func createOpenAITestPayload(modelID string, isOAuth bool) map[string]any { + payload := map[string]any{ + "model": modelID, + "input": []map[string]any{ + { + "role": "user", + "content": []map[string]any{ + { + "type": "input_text", + "text": "hi", + }, + }, + }, + }, + "stream": true, + } + + // OAuth accounts using ChatGPT internal API require store: false and instructions + if isOAuth { + payload["store"] = false + payload["instructions"] = openai.DefaultInstructions + } + + return payload +} + +// processClaudeStream processes the SSE stream from Claude API +func (s *AccountTestService) processClaudeStream(c *gin.Context, body io.Reader) error { reader := bufio.NewReader(body) for { @@ -291,6 +451,59 @@ func (s *AccountTestService) processStream(c *gin.Context, body io.Reader) error } } +// processOpenAIStream processes the SSE stream from OpenAI Responses API +func (s *AccountTestService) processOpenAIStream(c *gin.Context, body io.Reader) error { + reader := bufio.NewReader(body) + + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + return s.sendErrorAndEnd(c, fmt.Sprintf("Stream read error: %s", err.Error())) + } + + line = strings.TrimSpace(line) + if line == "" || !strings.HasPrefix(line, "data: ") { + continue + } + + jsonStr := strings.TrimPrefix(line, "data: ") + if jsonStr == "[DONE]" { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + + var data map[string]any + if err := json.Unmarshal([]byte(jsonStr), &data); err != nil { + continue + } + + eventType, _ := data["type"].(string) + + switch eventType { + case "response.output_text.delta": + // OpenAI Responses API uses "delta" field for text content + if delta, ok := data["delta"].(string); ok && delta != "" { + s.sendEvent(c, TestEvent{Type: "content", Text: delta}) + } + case "response.completed": + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + case "error": + errorMsg := "Unknown error" + if errData, ok := data["error"].(map[string]any); ok { + if msg, ok := errData["message"].(string); ok { + errorMsg = msg + } + } + return s.sendErrorAndEnd(c, errorMsg) + } + } +} + // sendEvent sends a SSE event to the client func (s *AccountTestService) sendEvent(c *gin.Context, event TestEvent) { eventJSON, _ := json.Marshal(event) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index c30041bd..0aa3f56a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -24,11 +24,6 @@ import ( "github.com/gin-gonic/gin" ) -// ClaudeUpstream handles HTTP requests to Claude API -type ClaudeUpstream interface { - Do(req *http.Request, proxyURL string) (*http.Response, error) -} - const ( claudeAPIURL = "https://api.anthropic.com/v1/messages?beta=true" claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" @@ -87,7 +82,7 @@ type GatewayService struct { rateLimitService *RateLimitService billingCacheService *BillingCacheService identityService *IdentityService - claudeUpstream ClaudeUpstream + httpUpstream ports.HTTPUpstream } // NewGatewayService creates a new GatewayService @@ -102,7 +97,7 @@ func NewGatewayService( rateLimitService *RateLimitService, billingCacheService *BillingCacheService, identityService *IdentityService, - claudeUpstream ClaudeUpstream, + httpUpstream ports.HTTPUpstream, ) *GatewayService { return &GatewayService{ accountRepo: accountRepo, @@ -115,7 +110,7 @@ func NewGatewayService( rateLimitService: rateLimitService, billingCacheService: billingCacheService, identityService: identityService, - claudeUpstream: claudeUpstream, + httpUpstream: httpUpstream, } } @@ -285,13 +280,13 @@ func (s *GatewayService) SelectAccountForModel(ctx context.Context, groupID *int } } - // 2. 获取可调度账号列表(排除限流和过载的账号) + // 2. 获取可调度账号列表(排除限流和过载的账号,仅限 Anthropic 平台) var accounts []model.Account var err error if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupID(ctx, *groupID) + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, model.PlatformAnthropic) } else { - accounts, err = s.accountRepo.ListSchedulable(ctx) + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, model.PlatformAnthropic) } if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) @@ -407,7 +402,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *m } // 发送请求 - resp, err := s.claudeUpstream.Do(upstreamReq, proxyURL) + resp, err := s.httpUpstream.Do(upstreamReq, proxyURL) if err != nil { return nil, fmt.Errorf("upstream request failed: %w", err) } @@ -481,7 +476,7 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex // 设置认证头 if tokenType == "oauth" { - req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("authorization", "Bearer "+token) } else { req.Header.Set("x-api-key", token) } @@ -502,8 +497,8 @@ func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Contex } // 确保必要的headers存在 - if req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "application/json") + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") } if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") @@ -982,7 +977,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, } // 发送请求 - resp, err := s.claudeUpstream.Do(upstreamReq, proxyURL) + resp, err := s.httpUpstream.Do(upstreamReq, proxyURL) if err != nil { s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Request failed") return fmt.Errorf("upstream request failed: %w", err) @@ -1049,7 +1044,7 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con // 设置认证头 if tokenType == "oauth" { - req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("authorization", "Bearer "+token) } else { req.Header.Set("x-api-key", token) } @@ -1073,8 +1068,8 @@ func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Con } // 确保必要的 headers 存在 - if req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "application/json") + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") } if req.Header.Get("anthropic-version") == "" { req.Header.Set("anthropic-version", "2023-06-01") diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go index 25350f03..462585c1 100644 --- a/backend/internal/service/identity_service.go +++ b/backend/internal/service/identity_service.go @@ -114,12 +114,12 @@ func (s *IdentityService) ApplyFingerprint(req *http.Request, fp *ports.Fingerpr return } - // 设置User-Agent + // 设置user-agent if fp.UserAgent != "" { - req.Header.Set("User-Agent", fp.UserAgent) + req.Header.Set("user-agent", fp.UserAgent) } - // 设置x-stainless-*头(使用正确的大小写) + // 设置x-stainless-*头 if fp.StainlessLang != "" { req.Header.Set("X-Stainless-Lang", fp.StainlessLang) } diff --git a/backend/internal/service/oauth_service.go b/backend/internal/service/oauth_service.go index 251bf446..71c2e809 100644 --- a/backend/internal/service/oauth_service.go +++ b/backend/internal/service/oauth_service.go @@ -284,3 +284,8 @@ func (s *OAuthService) RefreshAccountToken(ctx context.Context, account *model.A return s.RefreshToken(ctx, refreshToken, proxyURL) } + +// Stop stops the session store cleanup goroutine +func (s *OAuthService) Stop() { + s.sessionStore.Stop() +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go new file mode 100644 index 00000000..b38d9203 --- /dev/null +++ b/backend/internal/service/openai_gateway_service.go @@ -0,0 +1,700 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "sub2api/internal/config" + "sub2api/internal/model" + "sub2api/internal/service/ports" + + "github.com/gin-gonic/gin" +) + +const ( + // ChatGPT internal API for OAuth accounts + chatgptCodexURL = "https://chatgpt.com/backend-api/codex/responses" + // OpenAI Platform API for API Key accounts (fallback) + openaiPlatformAPIURL = "https://api.openai.com/v1/responses" + openaiStickySessionTTL = time.Hour // 粘性会话TTL +) + +// OpenAI allowed headers whitelist (for non-OAuth accounts) +var openaiAllowedHeaders = map[string]bool{ + "accept-language": true, + "content-type": true, + "user-agent": true, + "originator": true, + "session_id": true, +} + +// OpenAIUsage represents OpenAI API response usage +type OpenAIUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationInputTokens int `json:"cache_creation_input_tokens,omitempty"` + CacheReadInputTokens int `json:"cache_read_input_tokens,omitempty"` +} + +// OpenAIForwardResult represents the result of forwarding +type OpenAIForwardResult struct { + RequestID string + Usage OpenAIUsage + Model string + Stream bool + Duration time.Duration + FirstTokenMs *int +} + +// OpenAIGatewayService handles OpenAI API gateway operations +type OpenAIGatewayService struct { + accountRepo ports.AccountRepository + usageLogRepo ports.UsageLogRepository + userRepo ports.UserRepository + userSubRepo ports.UserSubscriptionRepository + cache ports.GatewayCache + cfg *config.Config + billingService *BillingService + rateLimitService *RateLimitService + billingCacheService *BillingCacheService + httpUpstream ports.HTTPUpstream +} + +// NewOpenAIGatewayService creates a new OpenAIGatewayService +func NewOpenAIGatewayService( + accountRepo ports.AccountRepository, + usageLogRepo ports.UsageLogRepository, + userRepo ports.UserRepository, + userSubRepo ports.UserSubscriptionRepository, + cache ports.GatewayCache, + cfg *config.Config, + billingService *BillingService, + rateLimitService *RateLimitService, + billingCacheService *BillingCacheService, + httpUpstream ports.HTTPUpstream, +) *OpenAIGatewayService { + return &OpenAIGatewayService{ + accountRepo: accountRepo, + usageLogRepo: usageLogRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + cache: cache, + cfg: cfg, + billingService: billingService, + rateLimitService: rateLimitService, + billingCacheService: billingCacheService, + httpUpstream: httpUpstream, + } +} + +// GenerateSessionHash generates session hash from header (OpenAI uses session_id header) +func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { + sessionID := c.GetHeader("session_id") + if sessionID == "" { + return "" + } + hash := sha256.Sum256([]byte(sessionID)) + return hex.EncodeToString(hash[:]) +} + +// SelectAccount selects an OpenAI account with sticky session support +func (s *OpenAIGatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*model.Account, error) { + return s.SelectAccountForModel(ctx, groupID, sessionHash, "") +} + +// SelectAccountForModel selects an account supporting the requested model +func (s *OpenAIGatewayService) SelectAccountForModel(ctx context.Context, groupID *int64, sessionHash string, requestedModel string) (*model.Account, error) { + // 1. Check sticky session + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, "openai:"+sessionHash) + if err == nil && accountID > 0 { + account, err := s.accountRepo.GetByID(ctx, accountID) + if err == nil && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { + // Refresh sticky session TTL + _ = s.cache.RefreshSessionTTL(ctx, "openai:"+sessionHash, openaiStickySessionTTL) + return account, nil + } + } + } + + // 2. Get schedulable OpenAI accounts + var accounts []model.Account + var err error + if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, model.PlatformOpenAI) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, model.PlatformOpenAI) + } + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + + // 3. Select by priority + LRU + var selected *model.Account + for i := range accounts { + acc := &accounts[i] + // Check model support + if requestedModel != "" && !acc.IsModelSupported(requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + // Lower priority value means higher priority + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + // Same priority, select least recently used + if acc.LastUsedAt == nil || (selected.LastUsedAt != nil && acc.LastUsedAt.Before(*selected.LastUsedAt)) { + selected = acc + } + } + } + + if selected == nil { + if requestedModel != "" { + return nil, fmt.Errorf("no available OpenAI accounts supporting model: %s", requestedModel) + } + return nil, errors.New("no available OpenAI accounts") + } + + // 4. Set sticky session + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, "openai:"+sessionHash, selected.ID, openaiStickySessionTTL) + } + + return selected, nil +} + +// GetAccessToken gets the access token for an OpenAI account +func (s *OpenAIGatewayService) GetAccessToken(ctx context.Context, account *model.Account) (string, string, error) { + if account.Type == model.AccountTypeOAuth { + accessToken := account.GetOpenAIAccessToken() + if accessToken == "" { + return "", "", errors.New("access_token not found in credentials") + } + return accessToken, "oauth", nil + } else if account.Type == model.AccountTypeApiKey { + apiKey := account.GetOpenAIApiKey() + if apiKey == "" { + return "", "", errors.New("api_key not found in credentials") + } + return apiKey, "apikey", nil + } + return "", "", fmt.Errorf("unsupported account type: %s", account.Type) +} + +// Forward forwards request to OpenAI API +func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, account *model.Account, body []byte) (*OpenAIForwardResult, error) { + startTime := time.Now() + + // Parse request body once (avoid multiple parse/serialize cycles) + var reqBody map[string]any + if err := json.Unmarshal(body, &reqBody); err != nil { + return nil, fmt.Errorf("parse request: %w", err) + } + + // Extract model and stream from parsed body + reqModel, _ := reqBody["model"].(string) + reqStream, _ := reqBody["stream"].(bool) + + // Track if body needs re-serialization + bodyModified := false + originalModel := reqModel + + // Apply model mapping + mappedModel := account.GetMappedModel(reqModel) + if mappedModel != reqModel { + reqBody["model"] = mappedModel + bodyModified = true + } + + // For OAuth accounts using ChatGPT internal API, add store: false + if account.Type == model.AccountTypeOAuth { + reqBody["store"] = false + bodyModified = true + } + + // Re-serialize body only if modified + if bodyModified { + var err error + body, err = json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("serialize request body: %w", err) + } + } + + // Get access token + token, _, err := s.GetAccessToken(ctx, account) + if err != nil { + return nil, err + } + + // Build upstream request + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, reqStream) + if err != nil { + return nil, err + } + + // Get proxy URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // Send request + resp, err := s.httpUpstream.Do(upstreamReq, proxyURL) + if err != nil { + return nil, fmt.Errorf("upstream request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + // Handle error response + if resp.StatusCode >= 400 { + return s.handleErrorResponse(ctx, resp, c, account) + } + + // Handle normal response + var usage *OpenAIUsage + var firstTokenMs *int + if reqStream { + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, mappedModel) + if err != nil { + return nil, err + } + usage = streamResult.usage + firstTokenMs = streamResult.firstTokenMs + } else { + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, mappedModel) + if err != nil { + return nil, err + } + } + + return &OpenAIForwardResult{ + RequestID: resp.Header.Get("x-request-id"), + Usage: *usage, + Model: originalModel, + Stream: reqStream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *model.Account, body []byte, token string, isStream bool) (*http.Request, error) { + // Determine target URL based on account type + var targetURL string + if account.Type == model.AccountTypeOAuth { + // OAuth accounts use ChatGPT internal API + targetURL = chatgptCodexURL + } else if account.Type == model.AccountTypeApiKey { + // API Key accounts use Platform API or custom base URL + baseURL := account.GetOpenAIBaseURL() + if baseURL != "" { + targetURL = baseURL + "/v1/responses" + } else { + targetURL = openaiPlatformAPIURL + } + } else { + targetURL = openaiPlatformAPIURL + } + + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + // Set authentication header + req.Header.Set("authorization", "Bearer "+token) + + // Set headers specific to OAuth accounts (ChatGPT internal API) + if account.Type == model.AccountTypeOAuth { + // Required: set Host for ChatGPT API (must use req.Host, not Header.Set) + req.Host = "chatgpt.com" + // Required: set chatgpt-account-id header + chatgptAccountID := account.GetChatGPTAccountID() + if chatgptAccountID != "" { + req.Header.Set("chatgpt-account-id", chatgptAccountID) + } + // Set accept header based on stream mode + if isStream { + req.Header.Set("accept", "text/event-stream") + } else { + req.Header.Set("accept", "application/json") + } + } + + // Whitelist passthrough headers + for key, values := range c.Request.Header { + lowerKey := strings.ToLower(key) + if openaiAllowedHeaders[lowerKey] { + for _, v := range values { + req.Header.Add(key, v) + } + } + } + + // Apply custom User-Agent if configured + customUA := account.GetOpenAIUserAgent() + if customUA != "" { + req.Header.Set("user-agent", customUA) + } + + // Ensure required headers exist + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") + } + + return req, nil +} + +func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *model.Account) (*OpenAIForwardResult, error) { + body, _ := io.ReadAll(resp.Body) + + // Check custom error codes + if !account.ShouldHandleErrorCode(resp.StatusCode) { + c.JSON(http.StatusInternalServerError, gin.H{ + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream gateway error", + }, + }) + return nil, fmt.Errorf("upstream error: %d (not in custom error codes)", resp.StatusCode) + } + + // Handle upstream error (mark account status) + s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) + + // Return appropriate error response + var errType, errMsg string + var statusCode int + + switch resp.StatusCode { + case 401: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream authentication failed, please contact administrator" + case 403: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream access forbidden, please contact administrator" + case 429: + statusCode = http.StatusTooManyRequests + errType = "rate_limit_error" + errMsg = "Upstream rate limit exceeded, please retry later" + default: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream request failed" + } + + c.JSON(statusCode, gin.H{ + "error": gin.H{ + "type": errType, + "message": errMsg, + }, + }) + + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) +} + +// openaiStreamingResult streaming response result +type openaiStreamingResult struct { + usage *OpenAIUsage + firstTokenMs *int +} + +func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *model.Account, startTime time.Time, originalModel, mappedModel string) (*openaiStreamingResult, error) { + // Set SSE response headers + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + // Pass through other headers + if v := resp.Header.Get("x-request-id"); v != "" { + c.Header("x-request-id", v) + } + + w := c.Writer + flusher, ok := w.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + usage := &OpenAIUsage{} + var firstTokenMs *int + scanner := bufio.NewScanner(resp.Body) + scanner.Buffer(make([]byte, 64*1024), 1024*1024) + + needModelReplace := originalModel != mappedModel + + for scanner.Scan() { + line := scanner.Text() + + // Replace model in response if needed + if needModelReplace && strings.HasPrefix(line, "data: ") { + line = s.replaceModelInSSELine(line, mappedModel, originalModel) + } + + // Forward line + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + + // Parse usage data + if strings.HasPrefix(line, "data: ") { + data := line[6:] + // Record first token time + if firstTokenMs == nil && data != "" && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsage(data, usage) + } + } + + if err := scanner.Err(); err != nil { + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", err) + } + + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil +} + +func (s *OpenAIGatewayService) replaceModelInSSELine(line, fromModel, toModel string) string { + data := line[6:] + if data == "" || data == "[DONE]" { + return line + } + + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return line + } + + // Replace model in response + if m, ok := event["model"].(string); ok && m == fromModel { + event["model"] = toModel + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) + } + + // Check nested response + if response, ok := event["response"].(map[string]any); ok { + if m, ok := response["model"].(string); ok && m == fromModel { + response["model"] = toModel + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) + } + } + + return line +} + +func (s *OpenAIGatewayService) parseSSEUsage(data string, usage *OpenAIUsage) { + // Parse response.completed event for usage (OpenAI Responses format) + var event struct { + Type string `json:"type"` + Response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } `json:"response"` + } + + if json.Unmarshal([]byte(data), &event) == nil && event.Type == "response.completed" { + usage.InputTokens = event.Response.Usage.InputTokens + usage.OutputTokens = event.Response.Usage.OutputTokens + usage.CacheReadInputTokens = event.Response.Usage.InputTokenDetails.CachedTokens + } +} + +func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *model.Account, originalModel, mappedModel string) (*OpenAIUsage, error) { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // Parse usage + var response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } + if err := json.Unmarshal(body, &response); err != nil { + return nil, fmt.Errorf("parse response: %w", err) + } + + usage := &OpenAIUsage{ + InputTokens: response.Usage.InputTokens, + OutputTokens: response.Usage.OutputTokens, + CacheReadInputTokens: response.Usage.InputTokenDetails.CachedTokens, + } + + // Replace model in response if needed + if originalModel != mappedModel { + body = s.replaceModelInResponseBody(body, mappedModel, originalModel) + } + + // Pass through headers + for key, values := range resp.Header { + for _, value := range values { + c.Header(key, value) + } + } + + c.Data(resp.StatusCode, "application/json", body) + + return usage, nil +} + +func (s *OpenAIGatewayService) replaceModelInResponseBody(body []byte, fromModel, toModel string) []byte { + var resp map[string]any + if err := json.Unmarshal(body, &resp); err != nil { + return body + } + + model, ok := resp["model"].(string) + if !ok || model != fromModel { + return body + } + + resp["model"] = toModel + newBody, err := json.Marshal(resp) + if err != nil { + return body + } + + return newBody +} + +// OpenAIRecordUsageInput input for recording usage +type OpenAIRecordUsageInput struct { + Result *OpenAIForwardResult + ApiKey *model.ApiKey + User *model.User + Account *model.Account + Subscription *model.UserSubscription +} + +// RecordUsage records usage and deducts balance +func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRecordUsageInput) error { + result := input.Result + apiKey := input.ApiKey + user := input.User + account := input.Account + subscription := input.Subscription + + // Calculate cost + tokens := UsageTokens{ + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + } + + // Get rate multiplier + multiplier := s.cfg.Default.RateMultiplier + if apiKey.GroupID != nil && apiKey.Group != nil { + multiplier = apiKey.Group.RateMultiplier + } + + cost, err := s.billingService.CalculateCost(result.Model, tokens, multiplier) + if err != nil { + cost = &CostBreakdown{ActualCost: 0} + } + + // Determine billing type + isSubscriptionBilling := subscription != nil && apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + billingType := model.BillingTypeBalance + if isSubscriptionBilling { + billingType = model.BillingTypeSubscription + } + + // Create usage log + durationMs := int(result.Duration.Milliseconds()) + usageLog := &model.UsageLog{ + UserID: user.ID, + ApiKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + CreatedAt: time.Now(), + } + + if apiKey.GroupID != nil { + usageLog.GroupID = apiKey.GroupID + } + if subscription != nil { + usageLog.SubscriptionID = &subscription.ID + } + + _ = s.usageLogRepo.Create(ctx, usageLog) + + // Deduct based on billing type + if isSubscriptionBilling { + if cost.TotalCost > 0 { + _ = s.userSubRepo.IncrementUsage(ctx, subscription.ID, cost.TotalCost) + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.UpdateSubscriptionUsage(cacheCtx, user.ID, *apiKey.GroupID, cost.TotalCost) + }() + } + } else { + if cost.ActualCost > 0 { + _ = s.userRepo.DeductBalance(ctx, user.ID, cost.ActualCost) + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.DeductBalanceCache(cacheCtx, user.ID, cost.ActualCost) + }() + } + } + + // Update account last used + _ = s.accountRepo.UpdateLastUsed(ctx, account.ID) + + return nil +} diff --git a/backend/internal/service/openai_oauth_service.go b/backend/internal/service/openai_oauth_service.go new file mode 100644 index 00000000..a7ce4a24 --- /dev/null +++ b/backend/internal/service/openai_oauth_service.go @@ -0,0 +1,257 @@ +package service + +import ( + "context" + "fmt" + "time" + + "sub2api/internal/model" + "sub2api/internal/pkg/openai" + "sub2api/internal/service/ports" +) + +// OpenAIOAuthService handles OpenAI OAuth authentication flows +type OpenAIOAuthService struct { + sessionStore *openai.SessionStore + proxyRepo ports.ProxyRepository + oauthClient ports.OpenAIOAuthClient +} + +// NewOpenAIOAuthService creates a new OpenAI OAuth service +func NewOpenAIOAuthService(proxyRepo ports.ProxyRepository, oauthClient ports.OpenAIOAuthClient) *OpenAIOAuthService { + return &OpenAIOAuthService{ + sessionStore: openai.NewSessionStore(), + proxyRepo: proxyRepo, + oauthClient: oauthClient, + } +} + +// OpenAIAuthURLResult contains the authorization URL and session info +type OpenAIAuthURLResult struct { + AuthURL string `json:"auth_url"` + SessionID string `json:"session_id"` +} + +// GenerateAuthURL generates an OpenAI OAuth authorization URL +func (s *OpenAIOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64, redirectURI string) (*OpenAIAuthURLResult, error) { + // Generate PKCE values + state, err := openai.GenerateState() + if err != nil { + return nil, fmt.Errorf("failed to generate state: %w", err) + } + + codeVerifier, err := openai.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("failed to generate code verifier: %w", err) + } + + codeChallenge := openai.GenerateCodeChallenge(codeVerifier) + + // Generate session ID + sessionID, err := openai.GenerateSessionID() + if err != nil { + return nil, fmt.Errorf("failed to generate session ID: %w", err) + } + + // Get proxy URL if specified + var proxyURL string + if proxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Use default redirect URI if not specified + if redirectURI == "" { + redirectURI = openai.DefaultRedirectURI + } + + // Store session + session := &openai.OAuthSession{ + State: state, + CodeVerifier: codeVerifier, + RedirectURI: redirectURI, + ProxyURL: proxyURL, + CreatedAt: time.Now(), + } + s.sessionStore.Set(sessionID, session) + + // Build authorization URL + authURL := openai.BuildAuthorizationURL(state, codeChallenge, redirectURI) + + return &OpenAIAuthURLResult{ + AuthURL: authURL, + SessionID: sessionID, + }, nil +} + +// OpenAIExchangeCodeInput represents the input for code exchange +type OpenAIExchangeCodeInput struct { + SessionID string + Code string + RedirectURI string + ProxyID *int64 +} + +// OpenAITokenInfo represents the token information for OpenAI +type OpenAITokenInfo struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + IDToken string `json:"id_token,omitempty"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + Email string `json:"email,omitempty"` + ChatGPTAccountID string `json:"chatgpt_account_id,omitempty"` + ChatGPTUserID string `json:"chatgpt_user_id,omitempty"` + OrganizationID string `json:"organization_id,omitempty"` +} + +// ExchangeCode exchanges authorization code for tokens +func (s *OpenAIOAuthService) ExchangeCode(ctx context.Context, input *OpenAIExchangeCodeInput) (*OpenAITokenInfo, error) { + // Get session + session, ok := s.sessionStore.Get(input.SessionID) + if !ok { + return nil, fmt.Errorf("session not found or expired") + } + + // Get proxy URL + proxyURL := session.ProxyURL + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Use redirect URI from session or input + redirectURI := session.RedirectURI + if input.RedirectURI != "" { + redirectURI = input.RedirectURI + } + + // Exchange code for token + tokenResp, err := s.oauthClient.ExchangeCode(ctx, input.Code, session.CodeVerifier, redirectURI, proxyURL) + if err != nil { + return nil, fmt.Errorf("failed to exchange code: %w", err) + } + + // Parse ID token to get user info + var userInfo *openai.UserInfo + if tokenResp.IDToken != "" { + claims, err := openai.ParseIDToken(tokenResp.IDToken) + if err == nil { + userInfo = claims.GetUserInfo() + } + } + + // Delete session after successful exchange + s.sessionStore.Delete(input.SessionID) + + tokenInfo := &OpenAITokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + IDToken: tokenResp.IDToken, + ExpiresIn: int64(tokenResp.ExpiresIn), + ExpiresAt: time.Now().Unix() + int64(tokenResp.ExpiresIn), + } + + if userInfo != nil { + tokenInfo.Email = userInfo.Email + tokenInfo.ChatGPTAccountID = userInfo.ChatGPTAccountID + tokenInfo.ChatGPTUserID = userInfo.ChatGPTUserID + tokenInfo.OrganizationID = userInfo.OrganizationID + } + + return tokenInfo, nil +} + +// RefreshToken refreshes an OpenAI OAuth token +func (s *OpenAIOAuthService) RefreshToken(ctx context.Context, refreshToken string, proxyURL string) (*OpenAITokenInfo, error) { + tokenResp, err := s.oauthClient.RefreshToken(ctx, refreshToken, proxyURL) + if err != nil { + return nil, err + } + + // Parse ID token to get user info + var userInfo *openai.UserInfo + if tokenResp.IDToken != "" { + claims, err := openai.ParseIDToken(tokenResp.IDToken) + if err == nil { + userInfo = claims.GetUserInfo() + } + } + + tokenInfo := &OpenAITokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + IDToken: tokenResp.IDToken, + ExpiresIn: int64(tokenResp.ExpiresIn), + ExpiresAt: time.Now().Unix() + int64(tokenResp.ExpiresIn), + } + + if userInfo != nil { + tokenInfo.Email = userInfo.Email + tokenInfo.ChatGPTAccountID = userInfo.ChatGPTAccountID + tokenInfo.ChatGPTUserID = userInfo.ChatGPTUserID + tokenInfo.OrganizationID = userInfo.OrganizationID + } + + return tokenInfo, nil +} + +// RefreshAccountToken refreshes token for an OpenAI account +func (s *OpenAIOAuthService) RefreshAccountToken(ctx context.Context, account *model.Account) (*OpenAITokenInfo, error) { + if !account.IsOpenAI() { + return nil, fmt.Errorf("account is not an OpenAI account") + } + + refreshToken := account.GetOpenAIRefreshToken() + if refreshToken == "" { + return nil, fmt.Errorf("no refresh token available") + } + + var proxyURL string + if account.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *account.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + return s.RefreshToken(ctx, refreshToken, proxyURL) +} + +// BuildAccountCredentials builds credentials map from token info +func (s *OpenAIOAuthService) BuildAccountCredentials(tokenInfo *OpenAITokenInfo) map[string]any { + expiresAt := time.Unix(tokenInfo.ExpiresAt, 0).Format(time.RFC3339) + + creds := map[string]any{ + "access_token": tokenInfo.AccessToken, + "refresh_token": tokenInfo.RefreshToken, + "expires_at": expiresAt, + } + + if tokenInfo.IDToken != "" { + creds["id_token"] = tokenInfo.IDToken + } + if tokenInfo.Email != "" { + creds["email"] = tokenInfo.Email + } + if tokenInfo.ChatGPTAccountID != "" { + creds["chatgpt_account_id"] = tokenInfo.ChatGPTAccountID + } + if tokenInfo.ChatGPTUserID != "" { + creds["chatgpt_user_id"] = tokenInfo.ChatGPTUserID + } + if tokenInfo.OrganizationID != "" { + creds["organization_id"] = tokenInfo.OrganizationID + } + + return creds +} + +// Stop stops the session store cleanup goroutine +func (s *OpenAIOAuthService) Stop() { + s.sessionStore.Stop() +} diff --git a/backend/internal/service/ports/account.go b/backend/internal/service/ports/account.go index 95b597d1..96788f08 100644 --- a/backend/internal/service/ports/account.go +++ b/backend/internal/service/ports/account.go @@ -27,6 +27,8 @@ type AccountRepository interface { ListSchedulable(ctx context.Context) ([]model.Account, error) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]model.Account, error) + ListSchedulableByPlatform(ctx context.Context, platform string) ([]model.Account, error) + ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]model.Account, error) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error SetOverloaded(ctx context.Context, id int64, until time.Time) error diff --git a/backend/internal/service/ports/http_upstream.go b/backend/internal/service/ports/http_upstream.go new file mode 100644 index 00000000..f3c4b369 --- /dev/null +++ b/backend/internal/service/ports/http_upstream.go @@ -0,0 +1,9 @@ +package ports + +import "net/http" + +// HTTPUpstream interface for making HTTP requests to upstream APIs (Claude, OpenAI, etc.) +// This is a generic interface that can be used for any HTTP-based upstream service. +type HTTPUpstream interface { + Do(req *http.Request, proxyURL string) (*http.Response, error) +} diff --git a/backend/internal/service/ports/openai_oauth.go b/backend/internal/service/ports/openai_oauth.go new file mode 100644 index 00000000..49789669 --- /dev/null +++ b/backend/internal/service/ports/openai_oauth.go @@ -0,0 +1,13 @@ +package ports + +import ( + "context" + + "sub2api/internal/pkg/openai" +) + +// OpenAIOAuthClient interface for OpenAI OAuth operations +type OpenAIOAuthClient interface { + ExchangeCode(ctx context.Context, code, codeVerifier, redirectURI, proxyURL string) (*openai.TokenResponse, error) + RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*openai.TokenResponse, error) +} diff --git a/backend/internal/service/service.go b/backend/internal/service/service.go index 292a9a7b..638aede7 100644 --- a/backend/internal/service/service.go +++ b/backend/internal/service/service.go @@ -2,30 +2,32 @@ package service // Services 服务集合容器 type Services struct { - Auth *AuthService - User *UserService - ApiKey *ApiKeyService - Group *GroupService - Account *AccountService - Proxy *ProxyService - Redeem *RedeemService - Usage *UsageService - Pricing *PricingService - Billing *BillingService - BillingCache *BillingCacheService - Admin AdminService - Gateway *GatewayService - OAuth *OAuthService - RateLimit *RateLimitService - AccountUsage *AccountUsageService - AccountTest *AccountTestService - Setting *SettingService - Email *EmailService - EmailQueue *EmailQueueService - Turnstile *TurnstileService - Subscription *SubscriptionService - Concurrency *ConcurrencyService - Identity *IdentityService - Update *UpdateService - TokenRefresh *TokenRefreshService + Auth *AuthService + User *UserService + ApiKey *ApiKeyService + Group *GroupService + Account *AccountService + Proxy *ProxyService + Redeem *RedeemService + Usage *UsageService + Pricing *PricingService + Billing *BillingService + BillingCache *BillingCacheService + Admin AdminService + Gateway *GatewayService + OpenAIGateway *OpenAIGatewayService + OAuth *OAuthService + OpenAIOAuth *OpenAIOAuthService + RateLimit *RateLimitService + AccountUsage *AccountUsageService + AccountTest *AccountTestService + Setting *SettingService + Email *EmailService + EmailQueue *EmailQueueService + Turnstile *TurnstileService + Subscription *SubscriptionService + Concurrency *ConcurrencyService + Identity *IdentityService + Update *UpdateService + TokenRefresh *TokenRefreshService } diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 7445634a..c7889e57 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -27,6 +27,7 @@ type TokenRefreshService struct { func NewTokenRefreshService( accountRepo ports.AccountRepository, oauthService *OAuthService, + openaiOAuthService *OpenAIOAuthService, cfg *config.Config, ) *TokenRefreshService { s := &TokenRefreshService{ @@ -38,9 +39,7 @@ func NewTokenRefreshService( // 注册平台特定的刷新器 s.refreshers = []TokenRefresher{ NewClaudeTokenRefresher(oauthService), - // 未来可以添加其他平台的刷新器: - // NewOpenAITokenRefresher(...), - // NewGeminiTokenRefresher(...), + NewOpenAITokenRefresher(openaiOAuthService), } return s diff --git a/backend/internal/service/token_refresher.go b/backend/internal/service/token_refresher.go index 3c152da6..c4e321bb 100644 --- a/backend/internal/service/token_refresher.go +++ b/backend/internal/service/token_refresher.go @@ -88,3 +88,54 @@ func (r *ClaudeTokenRefresher) Refresh(ctx context.Context, account *model.Accou return newCredentials, nil } + +// OpenAITokenRefresher 处理 OpenAI OAuth token刷新 +type OpenAITokenRefresher struct { + openaiOAuthService *OpenAIOAuthService +} + +// NewOpenAITokenRefresher 创建 OpenAI token刷新器 +func NewOpenAITokenRefresher(openaiOAuthService *OpenAIOAuthService) *OpenAITokenRefresher { + return &OpenAITokenRefresher{ + openaiOAuthService: openaiOAuthService, + } +} + +// CanRefresh 检查是否能处理此账号 +// 只处理 openai 平台的 oauth 类型账号 +func (r *OpenAITokenRefresher) CanRefresh(account *model.Account) bool { + return account.Platform == model.PlatformOpenAI && + account.Type == model.AccountTypeOAuth +} + +// NeedsRefresh 检查token是否需要刷新 +// 基于 expires_at 字段判断是否在刷新窗口内 +func (r *OpenAITokenRefresher) NeedsRefresh(account *model.Account, refreshWindow time.Duration) bool { + expiresAt := account.GetOpenAITokenExpiresAt() + if expiresAt == nil { + return false + } + + return time.Until(*expiresAt) < refreshWindow +} + +// Refresh 执行token刷新 +// 保留原有credentials中的所有字段,只更新token相关字段 +func (r *OpenAITokenRefresher) Refresh(ctx context.Context, account *model.Account) (map[string]any, error) { + tokenInfo, err := r.openaiOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return nil, err + } + + // 使用服务提供的方法构建新凭证,并保留原有字段 + newCredentials := r.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // 保留原有credentials中非token相关字段 + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + return newCredentials, nil +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index 4d293352..fa1c1d8f 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -37,9 +37,10 @@ func ProvideEmailQueueService(emailService *EmailService) *EmailQueueService { func ProvideTokenRefreshService( accountRepo ports.AccountRepository, oauthService *OAuthService, + openaiOAuthService *OpenAIOAuthService, cfg *config.Config, ) *TokenRefreshService { - svc := NewTokenRefreshService(accountRepo, oauthService, cfg) + svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, cfg) svc.Start() return svc } @@ -60,7 +61,9 @@ var ProviderSet = wire.NewSet( NewBillingCacheService, NewAdminService, NewGatewayService, + NewOpenAIGatewayService, NewOAuthService, + NewOpenAIOAuthService, NewRateLimitService, NewAccountUsageService, NewAccountTestService, diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue index 71c1a05f..d2aee588 100644 --- a/frontend/src/components/account/AccountUsageCell.vue +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -1,7 +1,7 @@