This commit introduces a major architectural refactoring to improve quota management, centralize logging, and streamline the relay handling logic. Key changes: - **Pre-consume Quota:** Implements a new mechanism to check and reserve user quota *before* making the request to the upstream provider. This ensures more accurate quota deduction and prevents users from exceeding their limits due to concurrent requests. - **Unified Relay Handlers:** Refactors the relay logic to use generic handlers (e.g., `ChatHandler`, `ImageHandler`) instead of provider-specific implementations. This significantly reduces code duplication and simplifies adding new channels. - **Centralized Logger:** A new dedicated `logger` package is introduced, and all system logging calls are migrated to use it, moving this responsibility out of the `common` package. - **Code Reorganization:** DTOs are generalized (e.g., `dalle.go` -> `openai_image.go`) and utility code is moved to more appropriate packages (e.g., `common/http.go` -> `service/http.go`) for better code structure.
69 lines
1.9 KiB
Go
69 lines
1.9 KiB
Go
package setting
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"math"
|
|
"one-api/logger"
|
|
"sync"
|
|
)
|
|
|
|
var ModelRequestRateLimitEnabled = false
|
|
var ModelRequestRateLimitDurationMinutes = 1
|
|
var ModelRequestRateLimitCount = 0
|
|
var ModelRequestRateLimitSuccessCount = 1000
|
|
var ModelRequestRateLimitGroup = map[string][2]int{}
|
|
var ModelRequestRateLimitMutex sync.RWMutex
|
|
|
|
func ModelRequestRateLimitGroup2JSONString() string {
|
|
ModelRequestRateLimitMutex.RLock()
|
|
defer ModelRequestRateLimitMutex.RUnlock()
|
|
|
|
jsonBytes, err := json.Marshal(ModelRequestRateLimitGroup)
|
|
if err != nil {
|
|
logger.SysError("error marshalling model ratio: " + err.Error())
|
|
}
|
|
return string(jsonBytes)
|
|
}
|
|
|
|
func UpdateModelRequestRateLimitGroupByJSONString(jsonStr string) error {
|
|
ModelRequestRateLimitMutex.RLock()
|
|
defer ModelRequestRateLimitMutex.RUnlock()
|
|
|
|
ModelRequestRateLimitGroup = make(map[string][2]int)
|
|
return json.Unmarshal([]byte(jsonStr), &ModelRequestRateLimitGroup)
|
|
}
|
|
|
|
func GetGroupRateLimit(group string) (totalCount, successCount int, found bool) {
|
|
ModelRequestRateLimitMutex.RLock()
|
|
defer ModelRequestRateLimitMutex.RUnlock()
|
|
|
|
if ModelRequestRateLimitGroup == nil {
|
|
return 0, 0, false
|
|
}
|
|
|
|
limits, found := ModelRequestRateLimitGroup[group]
|
|
if !found {
|
|
return 0, 0, false
|
|
}
|
|
return limits[0], limits[1], true
|
|
}
|
|
|
|
func CheckModelRequestRateLimitGroup(jsonStr string) error {
|
|
checkModelRequestRateLimitGroup := make(map[string][2]int)
|
|
err := json.Unmarshal([]byte(jsonStr), &checkModelRequestRateLimitGroup)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for group, limits := range checkModelRequestRateLimitGroup {
|
|
if limits[0] < 0 || limits[1] < 1 {
|
|
return fmt.Errorf("group %s has negative rate limit values: [%d, %d]", group, limits[0], limits[1])
|
|
}
|
|
if limits[0] > math.MaxInt32 || limits[1] > math.MaxInt32 {
|
|
return fmt.Errorf("group %s [%d, %d] has max rate limits value 2147483647", group, limits[0], limits[1])
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|